[PATCH] D127576: [RISCV] Teach vsetvli insertion to not insert redundant vsetvli right after VLEFF/VLSEGFF.

Yeting Kuo via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 14 22:58:53 PDT 2022


This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG9096a52566cb: [RISCV] Teach vsetvli insertion to not insert redundant vsetvli right after… (authored by fakepaper56).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D127576/new/

https://reviews.llvm.org/D127576

Files:
  llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
  llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll


Index: llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -492,14 +492,13 @@
 }
 
 ; Fault first loads can modify VL.
-; TODO: The first and third VSETVLIs are redundant here.
+; TODO: The VSETVLI of vadd could be removed here.
 define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
 ; CHECK-LABEL: vleNff:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a2
 ; CHECK-NEXT:    ret
 entry:
@@ -512,6 +511,25 @@
   ret <vscale x 1 x i64> %5
 }
 
+; Similiar test case, but use same policy for vleff and vadd.
+; Note: The test may be redundant if we could fix the TODO of @vleNff.
+define <vscale x 1 x i64> @vleNff2(i64* %str, i64 %n, i64 %x) {
+; CHECK-LABEL: vleNff2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vle64ff.v v8, (a0)
+; CHECK-NEXT:    vadd.vx v8, v8, a2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
+  %1 = bitcast i64* %str to <vscale x 1 x i64>*
+  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
+  %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
+  %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
+  %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %3, i64 %x, i64 %4)
+  ret <vscale x 1 x i64> %5
+}
+
 declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(
   <vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64)
 
Index: llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -969,6 +969,12 @@
     return;
   }
 
+  if (RISCV::isFaultFirstLoad(MI)) {
+    // Update AVL to vl-output of the fault first load.
+    Info.setAVLReg(MI.getOperand(1).getReg());
+    return;
+  }
+
   // If this is something that updates VL/VTYPE that we don't know about, set
   // the state to unknown.
   if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
@@ -1259,11 +1265,7 @@
       continue;
     }
 
-    // If this is something that updates VL/VTYPE that we don't know about,
-    // set the state to unknown.
-    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
-        MI.modifiesRegister(RISCV::VTYPE))
-      CurInfo = VSETVLIInfo::getUnknown();
+    transferAfter(CurInfo, MI);
   }
 }
 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D127576.437041.patch
Type: text/x-patch
Size: 2911 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20220615/38b2e10a/attachment.bin>


More information about the llvm-commits mailing list