[llvm] [RISCV][GISel] Support select vsetvli intrinsics (PR #174076)
Jianjian Guan via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 31 01:18:01 PST 2025
https://github.com/jacquesguan created https://github.com/llvm/llvm-project/pull/174076
None
>From e5f5edfac42ea042b1be450d2191d8b90cd69895 Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Tue, 30 Dec 2025 17:49:00 +0800
Subject: [PATCH] [RISCV][GISel] Support select vsetvli intrinsics
---
.../RISCV/GISel/RISCVInstructionSelector.cpp | 76 +++++++++
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 2 +
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 11 ++
.../GlobalISel/rvv/vsetvli-intrinsics.ll | 153 ++++++++++++++++++
4 files changed, 242 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsetvli-intrinsics.ll
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 4f2e633c1c524..bdd79e7e6a0db 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -99,6 +99,7 @@ class RISCVInstructionSelector : public InstructionSelector {
LLT *IndexVT = nullptr) const;
bool selectIntrinsicWithSideEffects(MachineInstr &I,
MachineIRBuilder &MIB) const;
+ bool selectIntrinsic(MachineInstr &I, MachineIRBuilder &MIB) const;
bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root,
@@ -968,6 +969,79 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
}
}
+bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I,
+ MachineIRBuilder &MIB) const {
+ // Find the intrinsic ID.
+ unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
+ // Select the instruction.
+ switch (IntrinID) {
+ default:
+ return false;
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax: {
+
+ bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
+
+ unsigned Offset = VLMax ? 2 : 3;
+ unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
+ RISCVVType::VLMUL VLMul =
+ static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
+
+ unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
+ /*MaskAgnostic*/ true);
+
+ Register DstReg = I.getOperand(0).getReg();
+
+ Register VLOperand;
+ unsigned Opcode = RISCV::PseudoVSETVLI;
+
+ // Check if AVL is a constant that equals VLMAX.
+ if (!VLMax) {
+ Register AVLReg = I.getOperand(2).getReg();
+ if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
+ uint64_t AVL = AVLConst->Value.getZExtValue();
+ if (auto VLEN = Subtarget->getRealVLen()) {
+ if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
+ VLMax = true;
+ }
+ }
+
+ MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
+ if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
+ const auto *C = AVLDef->getOperand(1).getCImm();
+ if (C->getValue().isAllOnes())
+ VLMax = true;
+ }
+ }
+
+ if (VLMax) {
+ VLOperand = Register(RISCV::X0);
+ Opcode = RISCV::PseudoVSETVLIX0;
+ } else {
+ Register AVLReg = I.getOperand(2).getReg();
+ VLOperand = AVLReg;
+
+ // Check if AVL is a small constant that can use PseudoVSETIVLI.
+ if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
+ uint64_t AVL = AVLConst->Value.getZExtValue();
+ if (isUInt<5>(AVL)) {
+ auto PseudoMI = MIB.buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
+ .addImm(AVL)
+ .addImm(VTypeI);
+ I.eraseFromParent();
+ return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
+ }
+ }
+ }
+
+ auto PseudoMI =
+ MIB.buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
+ I.eraseFromParent();
+ return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
+ }
+ }
+}
+
bool RISCVInstructionSelector::selectExtractSubvector(
MachineInstr &MI, MachineIRBuilder &MIB) const {
assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
@@ -1279,6 +1353,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
}
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
return selectIntrinsicWithSideEffects(MI, MIB);
+ case TargetOpcode::G_INTRINSIC:
+ return selectIntrinsic(MI, MIB);
case TargetOpcode::G_EXTRACT_SUBVECTOR:
return selectExtractSubvector(MI, MIB);
default:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 7059d249572e4..ae43fccf8e818 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -800,6 +800,8 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MI.eraseFromParent();
return true;
}
+ case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvlimax:
case Intrinsic::riscv_masked_atomicrmw_add:
case Intrinsic::riscv_masked_atomicrmw_sub:
case Intrinsic::riscv_masked_cmpxchg:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 28764f7e2fd7b..1bb197cb44b5d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#define GET_TARGET_REGBANK_IMPL
#include "RISCVGenRegisterBank.inc"
@@ -546,6 +547,16 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
}
}
+
+ if (IntrinsicID == Intrinsic::riscv_vsetvli ||
+ IntrinsicID == Intrinsic::riscv_vsetvlimax) {
+ for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
+ const MachineOperand &MO = MI.getOperand(Idx);
+ if (!MO.isReg())
+ continue;
+ OpdsMapping[Idx] = GPRValueMapping;
+ }
+ }
break;
}
default:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsetvli-intrinsics.ll
new file mode 100644
index 0000000000000..1081f1d5eb6f5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsetvli-intrinsics.ll
@@ -0,0 +1,153 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -global-isel \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,VLENUNKNOWN
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,VLENUNKNOWN
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -global-isel \
+; RUN: -riscv-v-vector-bits-max=128 -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,VLEN128
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \
+; RUN: -riscv-v-vector-bits-max=128 -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,VLEN128
+
+define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e64mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind {
+; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli a0, 0, e8, mf2, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind {
+; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvlimax_e32m2() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e32m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvlimax_e64m4() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e64m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvlimax_e64m8() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e64m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
+ ret iXLen %vl
+}
+
+; Check that we remove the intrinsic if it's unused.
+define void @test_vsetvli_e8m1_nouse(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e8m1_nouse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+ ret void
+}
+
+define void @test_vsetvlimax_e32m2_nouse() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e32m2_nouse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
+ ret void
+}
+
+; Check that we remove the redundant vsetvli when followed by another operation
+define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, ptr %ptr) nounwind {
+; CHECK-LABEL: redundant_vsetvli:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
+ %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> poison, ptr %ptr, iXLen %vl)
+ ret <vscale x 4 x i32> %x
+}
+
+; Check that we remove the repeated/redundant vsetvli when followed by another
+; operation
+; FIXME: We don't catch the second vsetvli because it has a use of its output.
+; We could replace it with the output of the first vsetvli.
+define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, ptr %ptr) nounwind {
+; CHECK-LABEL: repeated_vsetvli:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: ret
+ %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
+ %vl1 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %vl0, iXLen 2, iXLen 1)
+ %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> poison, ptr %ptr, iXLen %vl1)
+ ret <vscale x 4 x i32> %x
+}
+
+define iXLen @test_vsetvli_negone_e8m1(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_negone_e8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen -1, iXLen 0, iXLen 0)
+ ret iXLen %vl
+}
+
+define iXLen @test_vsetvli_eqvlmax_e8m8(iXLen %avl) nounwind {
+; VLENUNKNOWN-LABEL: test_vsetvli_eqvlmax_e8m8:
+; VLENUNKNOWN: # %bb.0:
+; VLENUNKNOWN-NEXT: li a0, 128
+; VLENUNKNOWN-NEXT: vsetvli a0, a0, e8, m8, ta, ma
+; VLENUNKNOWN-NEXT: ret
+;
+; VLEN128-LABEL: test_vsetvli_eqvlmax_e8m8:
+; VLEN128: # %bb.0:
+; VLEN128-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; VLEN128-NEXT: ret
+ %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 128, iXLen 0, iXLen 3)
+ ret iXLen %vl
+}
More information about the llvm-commits
mailing list