[llvm] 05a2ae1 - [RISCV][InsertVSETVLI] Using right instruction during mutate AVL of vsetvli

Kito Cheng via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 5 08:44:36 PST 2023


Author: Kito Cheng
Date: 2023-01-06T00:44:30+08:00
New Revision: 05a2ae1b4aadf9537b49c5429f5085c78c669c33

URL: https://github.com/llvm/llvm-project/commit/05a2ae1b4aadf9537b49c5429f5085c78c669c33
DIFF: https://github.com/llvm/llvm-project/commit/05a2ae1b4aadf9537b49c5429f5085c78c669c33.diff

LOG: [RISCV][InsertVSETVLI] Using right instruction during mutate AVL of vsetvli

Fixing a crash during vsetvli insertion pass.

We have a testcase with 3 vsetvli:

1. vsetivli        zero, 2, e8, m4, ta, ma
2. li      a1, 32;  vsetvli zero, a1, e8, m4, ta, mu
3. vsetivli        zero, 2, e8, m4, ta, ma

and then we trying to optimize 2nd vsetvli since the only user is vmv.x.s, so
it could mutate the AVL operand to the AVL operand of the 3rd vsetvli.
OK, so we propagate 2 to vsetvli, BUT it's vsetvli not vsetivli, so it expect a
register rather than a immediate value, so we have to update the opcode
if needed.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D141061

Added: 
    llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 361bcc41e48c9..bc6af29ee1de7 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1316,6 +1316,8 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
             MI.getOperand(1).ChangeToImmediate(NextMI->getOperand(1).getImm());
           else
             MI.getOperand(1).ChangeToRegister(NextMI->getOperand(1).getReg(), false);
+          if (MI.getOpcode() != NextMI->getOpcode())
+            MI.setDesc(TII->get(NextMI->getOpcode()));
         }
         MI.getOperand(2).setImm(NextMI->getOperand(2).getImm());
         ToDelete.push_back(NextMI);

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
new file mode 100644
index 0000000000000..d276d725cc7b9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+ at __const.test.var_45 = private unnamed_addr constant [2 x i8] c"\D1S", align 1
+ at __const.test.var_101 = private unnamed_addr constant [2 x i8] c"\830", align 1
+
+; Function Attrs: nounwind vscale_range(2,1024)
+define dso_local void @test(ptr nocapture noundef %var_99) {
+; CHECK-LABEL: test:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lui a1, %hi(.L__const.test.var_45)
+; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_45)
+; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    li a1, 1
+; CHECK-NEXT:    vmul.vx v12, v8, a1
+; CHECK-NEXT:    lui a1, %hi(.L__const.test.var_101)
+; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_101)
+; CHECK-NEXT:    vle8.v v16, (a1)
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.x.s a1, v12
+; CHECK-NEXT:    vmsleu.vx v0, v8, a1
+; CHECK-NEXT:    vssra.vv v8, v16, v8
+; CHECK-NEXT:    vmerge.vvm v8, v8, v8, v0
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr nonnull @__const.test.var_45, i64 2)
+  %1 = tail call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %0, i8 1, i64 2)
+  %2 = tail call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr nonnull @__const.test.var_101, i64 2)
+  %3 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2)
+  %4 = tail call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %1)
+  %5 = tail call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %2, <vscale x 32 x i8> %0, i64 2)
+  %6 = tail call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> %0, i8 %4, i64 2)
+  %7 = tail call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %5, <vscale x 32 x i8> %5, <vscale x 32 x i1> %6, i64 2)
+  tail call void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8> %7, ptr %var_99, i64 2)
+  ret void
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8>, ptr nocapture, i64) #1
+declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, i64) #2
+declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3
+declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>) #2
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64) #3
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8>, i8, i64) #2
+declare <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64) #2
+declare void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8>, ptr nocapture, i64) #4
+
+attributes #1 = { nofree nounwind memory(read) }
+attributes #2 = { nofree nosync nounwind memory(none) }
+attributes #3 = { nounwind }
+attributes #4 = { nounwind memory(write) }


        


More information about the llvm-commits mailing list