[llvm] 7c4e9a6 - [RISCV] Use 0 for Log2SEW for vle1/vse1 intrinsics to enable vsetvli optimization.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 7 22:45:06 PDT 2021


Author: Craig Topper
Date: 2021-06-07T22:41:14-07:00
New Revision: 7c4e9a68264ffeef6178865be76c45c4fb6390af

URL: https://github.com/llvm/llvm-project/commit/7c4e9a68264ffeef6178865be76c45c4fb6390af
DIFF: https://github.com/llvm/llvm-project/commit/7c4e9a68264ffeef6178865be76c45c4fb6390af.diff

LOG: [RISCV] Use 0 for Log2SEW for vle1/vse1 intrinsics to enable vsetvli optimization.

Missed in D103299.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 646e11bae7823..20caccf0b45c9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -899,9 +899,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
 
       MVT VT = Node->getSimpleValueType(0);
-      unsigned Log2EEW = Log2_32(VT.getScalarSizeInBits());
-      // VLE1 uses an SEW of 8.
-      unsigned Log2SEW = (IntNo == Intrinsic::riscv_vle1) ? 3 : Log2EEW;
+      unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
 
       unsigned CurOp = 2;
       SmallVector<SDValue, 8> Operands;
@@ -913,7 +911,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
 
       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
       const RISCV::VLEPseudo *P =
-          RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2EEW,
+          RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
                               static_cast<unsigned>(LMUL));
       MachineSDNode *Load =
           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1090,9 +1088,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
 
       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
-      unsigned Log2EEW = Log2_32(VT.getScalarSizeInBits());
-      // VSE1 uses an SEW of 8.
-      unsigned Log2SEW = (IntNo == Intrinsic::riscv_vse1) ? 3 : Log2EEW;
+      unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
 
       unsigned CurOp = 2;
       SmallVector<SDValue, 8> Operands;
@@ -1103,7 +1099,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
 
       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
-          IsMasked, IsStrided, Log2EEW, static_cast<unsigned>(LMUL));
+          IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
       MachineSDNode *Store =
           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
       if (auto *MemOp = dyn_cast<MemSDNode>(Node))

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
index c4e1cb22fbcbb..c3b4798d0c7a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
@@ -92,3 +92,46 @@ entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i32 %2)
   ret void
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+; Make sure we can use the vsetvli from the producing instruction.
+define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, i32 %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %3)
+  call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i32 %3)
+  ret void
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, i32 %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %3)
+  call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i32 %3)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
index e605cf5bdd9c1..195bc3e1f07ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
@@ -92,3 +92,46 @@ entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i64 %2)
   ret void
 }
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+; Make sure we can use the vsetvli from the producing instruction.
+define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, i64 %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %3)
+  call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i64 %3)
+  ret void
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, i64 %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vmseq.vv v25, v8, v9
+; CHECK-NEXT:    vse1.v v25, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %3)
+  call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i64 %3)
+  ret void
+}


        


More information about the llvm-commits mailing list