[llvm] 9096a52 - [RISCV] Teach vsetvli insertion to not insert redundant vsetvli right after VLEFF/VLSEGFF.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 14 22:58:46 PDT 2022


Author: Yeting Kuo
Date: 2022-06-15T13:58:40+08:00
New Revision: 9096a52566cb47c513fad449acbaf83c2fb921a0

URL: https://github.com/llvm/llvm-project/commit/9096a52566cb47c513fad449acbaf83c2fb921a0
DIFF: https://github.com/llvm/llvm-project/commit/9096a52566cb47c513fad449acbaf83c2fb921a0.diff

LOG: [RISCV] Teach vsetvli insertion to not insert redundant vsetvli right after VLEFF/VLSEGFF.

VSETVLIInfos right after VLEFF/VLSEGFF are currently unknown since they modify
VL. Unknown VSETVLIInfos make next vector operations needed to be inserted
VSET(I)VLI. Actually the next vector operation of VLEFF/VLSEGFF may not need to
be inserted VSET(I)VLI if it uses same VTYPE and the resulted vl of
VLEFF/VLSEGFF.

Take the below C code as an example,

  vint8m4_t vec_src1 = vle8ff_v_i8m4(str1, &new_vl, vl);
  vbool2_t mask1 = vmseq_vx_i8m4_b2(vec_src1, 0, new_vl);
  vsetvli insertion adds a redundant vsetvli for that,

Assembly result:
  vsetvli a2,a2,e8,m4,ta,mu
  vle8ff.v v28,(a0)
  csrr a3,vl ; redundant
  vsetvli zero,a3,e8,m4,ta,mu ; redundant
  vmseq.vi v25,v28,0

After D126794, VLEFF/VLSEGFF has a define having value of VL. The patch consider
there is a ghost vsetvli right after VLEFF/VLSEGFF. The ghost VSET(I)LIs use the
vl output of the VLEFF/VLSEGFF as its AVL and same VTYPE of the VLEFF/VLSEGFF.
The ghost vsetvli must be redundant, and we could use it to get the VSETVLIInfo
right after VLEFF/VLSEGFF.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D127576

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index c86dfa080b70c..9ae84f4bf7aed 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -969,6 +969,12 @@ void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info, const MachineInstr &MI
     return;
   }
 
+  if (RISCV::isFaultFirstLoad(MI)) {
+    // Update AVL to vl-output of the fault first load.
+    Info.setAVLReg(MI.getOperand(1).getReg());
+    return;
+  }
+
   // If this is something that updates VL/VTYPE that we don't know about, set
   // the state to unknown.
   if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
@@ -1259,11 +1265,7 @@ void RISCVInsertVSETVLI::doLocalPrepass(MachineBasicBlock &MBB) {
       continue;
     }
 
-    // If this is something that updates VL/VTYPE that we don't know about,
-    // set the state to unknown.
-    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
-        MI.modifiesRegister(RISCV::VTYPE))
-      CurInfo = VSETVLIInfo::getUnknown();
+    transferAfter(CurInfo, MI);
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 3fff538b57171..acb1013888731 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -492,14 +492,13 @@ entry:
 }
 
 ; Fault first loads can modify VL.
-; TODO: The first and third VSETVLIs are redundant here.
+; TODO: The VSETVLI of vadd could be removed here.
 define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a2
 ; CHECK-NEXT:    ret
 entry:
@@ -512,6 +511,25 @@ entry:
   ret <vscale x 1 x i64> %5
 }
 
+; Similiar test case, but use same policy for vleff and vadd.
+; Note: The test may be redundant if we could fix the TODO of @vleNff.
+define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
+; CHECK-LABEL: vleNff2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vle64ff.v v8, (a0)
+; CHECK-NEXT:    vadd.vx v8, v8, a2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
+  %1 = bitcast i64* %str to <vscale x 1 x i64>*
+  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
+  %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
+  %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
+  %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %3, i64 %x, i64 %4)
+  ret <vscale x 1 x i64> %5
+}
+
 declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(
   <vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64)
 


        


More information about the llvm-commits mailing list