[llvm] [RISCV] Add implicit operand {VL, VTYPE} in RISCVInsertVSETVLI when u… (PR #130733)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 11 01:36:06 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Hank Chang (HankChang736)
<details>
<summary>Changes</summary>
…sing inline assembly.
Fixing [#<!-- -->128636](https://github.com/llvm/llvm-project/pull/128636).
This patch has RISCVInsertVSETVLI to add implicit use operand to inline assembly, this approach is suggested by @<!-- -->preames and the implementation I referenced is from @<!-- -->topperc . The purpose of adding vl, vtype implicit operand is to prevent Post-RA scheduler moving vsetvl across inline assembly.
---
Full diff: https://github.com/llvm/llvm-project/pull/130733.diff
2 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp (+7)
- (added) llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll (+37)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 7433603daff85..2247610c21ffb 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1531,6 +1531,13 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
/*isImp*/ true));
}
+ if (MI.isInlineAsm()) {
+ MI.addOperand(MachineOperand::CreateReg(RISCV::VL, /*isDef*/ true,
+ /*isImp*/ true));
+ MI.addOperand(MachineOperand::CreateReg(RISCV::VTYPE, /*isDef*/ true,
+ /*isImp*/ true));
+ }
+
if (MI.isCall() || MI.isInlineAsm() ||
MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) ||
MI.modifiesRegister(RISCV::VTYPE, /*TRI=*/nullptr))
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll
new file mode 100644
index 0000000000000..b955ee9efc495
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -verify-machineinstrs < %s | FileCheck %s
+
+define void @foo(<vscale x 8 x half> %0, <vscale x 8 x half> %1) {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: lui a0, 1
+; CHECK-NEXT: addiw a0, a0, -1096
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vfmadd.vv v16, v12, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vfmadd.vv v16, v12, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vse16.v v8, (zero)
+; CHECK-NEXT: ret
+entry:
+ %2 = tail call i64 @llvm.riscv.vsetvli.i64(i64 3000, i64 0, i64 0)
+ %3 = tail call <vscale x 8 x float> asm sideeffect "vfmadd.vv $0, $1, $2", "=^vr,^vr,^vr,0"(<vscale x 8 x float> zeroinitializer, <vscale x 8 x float> zeroinitializer, <vscale x 8 x float> zeroinitializer)
+ %4 = tail call <vscale x 8 x float> asm sideeffect "vfmadd.vv $0, $1, $2", "=^vr,^vr,^vr,0"(<vscale x 8 x float> zeroinitializer, <vscale x 8 x float> zeroinitializer, <vscale x 8 x float> %3)
+ tail call void @llvm.riscv.vse.nxv8f16.i64(<vscale x 8 x half> %0, ptr null, i64 %2)
+ ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #0
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: write)
+declare void @llvm.riscv.vse.nxv8f16.i64(<vscale x 8 x half>, ptr nocapture, i64) #1
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
``````````
</details>
https://github.com/llvm/llvm-project/pull/130733
More information about the llvm-commits
mailing list