[llvm] a2a07e8 - [RISCV] Fold store of vmv.x.s to a vse with VL=1.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 27 09:54:53 PDT 2021


Author: Craig Topper
Date: 2021-09-27T09:54:46-07:00
New Revision: a2a07e8db3bf64440f24d9d6408df214886826de

URL: https://github.com/llvm/llvm-project/commit/a2a07e8db3bf64440f24d9d6408df214886826de
DIFF: https://github.com/llvm/llvm-project/commit/a2a07e8db3bf64440f24d9d6408df214886826de.diff

LOG: [RISCV] Fold store of vmv.x.s to a vse with VL=1.

This can avoid a loss of decoupling with the scalar unit on cores
with decoupled scalar and vector units.

We should support FP too, but those use extract_element and not a
custom ISD node so it is a little different. I also left a FIXME
in the test for i64 extract and store on RV32.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D109482

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/SelectionDAG.h
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 614d6f5260a8..edc70dda05d8 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1352,7 +1352,8 @@ class SelectionDAG {
   SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                      SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
                      Align Alignment, MachineMemOperand::Flags MMOFlags,
-                     const AAMDNodes &AAInfo, bool IsCompressing = false);
+                     const AAMDNodes &AAInfo = AAMDNodes(),
+                     bool IsCompressing = false);
   SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                      SDValue Mask, SDValue EVL, MachineMemOperand *MMO,
                      bool IsCompressing = false);

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9b45ff188f25..27bf689b57cd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -930,6 +930,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setTargetDAGCombine(ISD::SRA);
     setTargetDAGCombine(ISD::SRL);
     setTargetDAGCombine(ISD::SHL);
+    setTargetDAGCombine(ISD::STORE);
   }
 }
 
@@ -7116,6 +7117,30 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
       return V;
     return SDValue();
   }
+  case ISD::STORE: {
+    auto *Store = cast<StoreSDNode>(N);
+    SDValue Val = Store->getValue();
+    // Combine store of vmv.x.s to vse with VL of 1.
+    // FIXME: Support FP.
+    if (Val.getOpcode() == RISCVISD::VMV_X_S) {
+      SDValue Src = Val.getOperand(0);
+      EVT VecVT = Src.getValueType();
+      EVT MemVT = Store->getMemoryVT();
+      // The memory VT and the element type must match.
+      if (VecVT.getVectorElementType() == MemVT) {
+        SDLoc DL(N);
+        MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
+        return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
+                              DAG.getConstant(1, DL, MaskVT),
+                              DAG.getConstant(1, DL, Subtarget.getXLenVT()),
+                              Store->getPointerInfo(),
+                              Store->getOriginalAlign(),
+                              Store->getMemOperand()->getFlags());
+      }
+    }
+
+    break;
+  }
   }
 
   return SDValue();

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 2331a77efcc9..2b2b63668f9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -521,3 +521,79 @@ define i64 @extractelt_v3i64_idx(<3 x i64>* %x, i32 signext %idx) nounwind {
   %c = extractelement <3 x i64> %b, i32 %idx
   ret i64 %c
 }
+
+define void @store_extractelt_v16i8(<16 x i8>* %x, i8* %p) nounwind {
+; CHECK-LABEL: store_extractelt_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v25, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 7
+; CHECK-NEXT:    vse8.v v25, (a1)
+; CHECK-NEXT:    ret
+  %a = load <16 x i8>, <16 x i8>* %x
+  %b = extractelement <16 x i8> %a, i32 7
+  store i8 %b, i8* %p
+  ret void
+}
+
+define void @store_extractelt_v8i16(<8 x i16>* %x, i16* %p) nounwind {
+; CHECK-LABEL: store_extractelt_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v25, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 7
+; CHECK-NEXT:    vse16.v v25, (a1)
+; CHECK-NEXT:    ret
+  %a = load <8 x i16>, <8 x i16>* %x
+  %b = extractelement <8 x i16> %a, i32 7
+  store i16 %b, i16* %p
+  ret void
+}
+
+define void @store_extractelt_v4i32(<4 x i32>* %x, i32* %p) nounwind {
+; CHECK-LABEL: store_extractelt_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v25, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; CHECK-NEXT:    vslidedown.vi v25, v25, 2
+; CHECK-NEXT:    vse32.v v25, (a1)
+; CHECK-NEXT:    ret
+  %a = load <4 x i32>, <4 x i32>* %x
+  %b = extractelement <4 x i32> %a, i32 2
+  store i32 %b, i32* %p
+  ret void
+}
+
+; FIXME: Use vse64.v on RV32 to avoid two scalar extracts and two scalar stores.
+define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind {
+; RV32-LABEL: store_extractelt_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vle64.v v25, (a0)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    vsrl.vx v26, v25, a0
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    vmv.x.s a2, v25
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    sw a0, 4(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: store_extractelt_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vle64.v v25, (a0)
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vse64.v v25, (a1)
+; RV64-NEXT:    ret
+  %a = load <2 x i64>, <2 x i64>* %x
+  %b = extractelement <2 x i64> %a, i64 1
+  store i64 %b, i64* %p
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index f24e0eefcc90..dccdbdb844db 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -727,10 +727,8 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    vmsne.vv v0, v26, v30
 ; RV32MV-NEXT:    vmv.v.i v26, 0
 ; RV32MV-NEXT:    vmerge.vim v26, v26, -1, v0
-; RV32MV-NEXT:    vsetivli zero, 0, e32, m2, ta, mu
-; RV32MV-NEXT:    vmv.x.s a0, v26
-; RV32MV-NEXT:    sw a0, 0(s1)
 ; RV32MV-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
+; RV32MV-NEXT:    vse32.v v26, (s1)
 ; RV32MV-NEXT:    vslidedown.vi v28, v26, 1
 ; RV32MV-NEXT:    vmv.x.s a0, v28
 ; RV32MV-NEXT:    vslidedown.vi v28, v26, 2


        


More information about the llvm-commits mailing list