[llvm] [RISCV] Support postRA vsetvl insertion pass (PR #70549)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Thu May 2 00:40:55 PDT 2024


================
@@ -567,10 +606,13 @@ class VSETVLIInfo {
   }
 
   bool hasSameAVL(const VSETVLIInfo &Other) const {
-    if (hasAVLReg() && Other.hasAVLReg())
+    if (hasAVLRegDefMI() && Other.hasAVLRegDefMI())
       return getAVLDefMI().isIdenticalTo(Other.getAVLDefMI()) &&
              getAVLReg() == Other.getAVLReg();
 
+    if (hasAVLReg() && Other.hasAVLReg())
+      return getAVLReg() == Other.getAVLReg();
----------------
lukel97 wrote:

I gave this a very rough attempt in my own branch, it seems to recover some of the regressions in vsetvli-insert-crossbb.ll and vsetvli-inesrt.ll https://github.com/lukel97/llvm-project/commit/47a6d183fbe81178dac668eda1a6f50d184a0dba

<details><summary>Details</summary>
<p>

```diff
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index dab1342fd9e4..c1403d240998 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -90,11 +90,12 @@ define <vscale x 1 x double> @test3(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
-; CHECK-NEXT:    vfmul.vv v8, v9, v8
-; CHECK-NEXT:    ret
+; CHECK-NEXT:    j .LBB2_3
 ; CHECK-NEXT:  .LBB2_2: # %if.else
 ; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v9, v8, v9
+; CHECK-NEXT:  .LBB2_3: # %if.end
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
 ; CHECK-NEXT:    ret
 entry:
@@ -445,6 +446,7 @@ define void @saxpy_vec(i64 %n, float %a, ptr nocapture readonly %x, ptr nocaptur
 ; CHECK-NEXT:    beqz a3, .LBB8_2
 ; CHECK-NEXT:  .LBB8_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v16, (a2)
 ; CHECK-NEXT:    slli a4, a3, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 6fc3e3917a5c..4c09bf45549f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -102,23 +102,24 @@ declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1>, <vscal
 define void @test6(ptr nocapture readonly %A, ptr nocapture %B, i64 %n) {
 ; CHECK-LABEL: test6:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a6, a2, e32, m1, ta, ma
-; CHECK-NEXT:    beqz a6, .LBB5_3
+; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, ma
+; CHECK-NEXT:    beqz a3, .LBB5_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:  .LBB5_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    slli a3, a4, 2
-; CHECK-NEXT:    add a5, a0, a3
+; CHECK-NEXT:    slli a6, a4, 2
+; CHECK-NEXT:    add a5, a0, a6
+; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a5)
 ; CHECK-NEXT:    vmsle.vi v9, v8, -3
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 2
 ; CHECK-NEXT:    vmor.mm v0, v9, v10
-; CHECK-NEXT:    add a3, a3, a1
-; CHECK-NEXT:    vse32.v v8, (a3), v0.t
-; CHECK-NEXT:    add a4, a4, a6
-; CHECK-NEXT:    vsetvli a6, a2, e32, m1, ta, ma
-; CHECK-NEXT:    bnez a6, .LBB5_2
+; CHECK-NEXT:    add a6, a6, a1
+; CHECK-NEXT:    vse32.v v8, (a6), v0.t
+; CHECK-NEXT:    add a4, a4, a3
+; CHECK-NEXT:    vsetvli a3, a2, e32, m1, ta, ma
+; CHECK-NEXT:    bnez a3, .LBB5_2
 ; CHECK-NEXT:  .LBB5_3: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:

```

</p>
</details> 

https://github.com/llvm/llvm-project/pull/70549


More information about the llvm-commits mailing list