[llvm] [RISCV][DAG] Teach computeKnownBits consider SEW/LMUL/AVL for vsetvli. (PR #76158)
Yeting Kuo via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 21 06:05:44 PST 2023
https://github.com/yetingk created https://github.com/llvm/llvm-project/pull/76158
This patch also add tests whose masks are too narrow to combine. I think it can help us to find out bugs caused by too large known bits.
>From 0879f773a9eb3707e0ec06dc547241b4448b4e6a Mon Sep 17 00:00:00 2001
From: Yeting Kuo <yeting.kuo at sifive.com>
Date: Thu, 21 Dec 2023 05:18:59 -0800
Subject: [PATCH] [RISCV][DAG] Teach computeKnownBits consider SEW/LMUL/AVL for
vsetvli.
This patch also add tests whose masks are too narrow to combine. I think
it can help us to find out bugs caused by too large known bits.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 24 +-
llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll | 610 +++++++++++++++++-
llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll | 626 +++++++++++++++++++
3 files changed, 1251 insertions(+), 9 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index de15bea72e4666..3409b1a5f5c405 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16001,13 +16001,27 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
// We can't do anything for most intrinsics.
break;
case Intrinsic::riscv_vsetvli:
- case Intrinsic::riscv_vsetvlimax:
- // Assume that VL output is <= 65536.
- // TODO: Take SEW and LMUL into account.
- if (BitWidth > 17)
- Known.Zero.setBitsFrom(17);
+ case Intrinsic::riscv_vsetvlimax: {
+ bool HasAVL = IntNo == Intrinsic::riscv_vsetvli;
+ unsigned VSEW = Op.getConstantOperandVal(HasAVL + 1);
+ RISCVII::VLMUL VLMUL =
+ static_cast<RISCVII::VLMUL>(Op.getConstantOperandVal(HasAVL + 2));
+ unsigned SEW = RISCVVType::decodeVSEW(VSEW);
+ auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMUL);
+ unsigned VLenMax = Subtarget.getRealMaxVLen();
+ unsigned MaxVL = VLenMax / SEW;
+ if (Fractional)
+ MaxVL /= LMul;
+ else
+ MaxVL *= LMul;
+ if (HasAVL && isa<ConstantSDNode>(Op.getOperand(1)))
+ MaxVL = std::min(MaxVL, (unsigned)Op.getConstantOperandVal(1));
+ unsigned KnownZeroFirstBit = Log2_32(MaxVL) + 1;
+ if (BitWidth > KnownZeroFirstBit)
+ Known.Zero.setBitsFrom(KnownZeroFirstBit);
break;
}
+ }
break;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
index 5804f8edf84d21..39d73bed25926b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
@@ -24,12 +24,614 @@ define zeroext i32 @vsetvl_zext() {
ret i32 %b
}
-define i64 @vsetvl_and17bits() {
-; CHECK-LABEL: vsetvl_and17bits:
+define i64 @vsetvl_e8m1_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m1_and14bits:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, ma
+; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, ma
; CHECK-NEXT: ret
- %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 0)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m1_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m1_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 0)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m1_constant_avl() {
+; CHECK-LABEL: vsetvl_e8m1_constant_avl:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli a0, 1, e8, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 0, i64 0)
+ %b = and i64 %a, 1
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m2_and15bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m2_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 1)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m2_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m2_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 1)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m4_and16bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m4_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 2)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m4_and15bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m4_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 49
+; CHECK-NEXT: srli a0, a0, 49
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 2)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8m8_and17bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m8_and17bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 3)
%b = and i64 %a, 131071
ret i64 %b
}
+
+define i64 @vsetvl_e8m8_and16bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8m8_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a0, a0, 48
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 3)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf2_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf2_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 5)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf2_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf2_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 5)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf4_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf4_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 6)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf4_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf4_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 6)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf8_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf8_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 7)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e8mf8_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e8mf8_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8, mf2, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 0, i64 7)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m1_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m1_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 0)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m1_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m1_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m1, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 0)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m2_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m2_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 1)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m2_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m2_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 1)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m4_and15bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m4_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 2)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m4_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m4_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 2)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m8_and16bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m8_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 3)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16m8_and15bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16m8_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 49
+; CHECK-NEXT: srli a0, a0, 49
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 3)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf2_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf2_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 5)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf2_and9bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf2_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 5)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf4_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf4_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 6)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf4_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf4_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 6)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf8_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf8_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 7)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e16mf8_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e16mf8_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 1, i64 7)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m1_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m1_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m1_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m1_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m1, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m2_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m2_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 1)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m2_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m2_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 1)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m4_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m4_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 2)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m4_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m4_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 2)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m8_and15bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m8_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 3)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32m8_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32m8_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 3)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf2_and9bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf2_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 5)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf2_and8bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf2_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 5)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf4_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf4_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 6)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf4_and9bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf4_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 6)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf8_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf8_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e32mf8_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e32mf8_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m1_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m1_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m1_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m1_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m2_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m2_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 1)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m2_and11bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m2_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m2, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 1)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m4_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m4_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 2)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m4_and12bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m4_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 2)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m8_and14bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m8_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 3)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64m8_and13bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64m8_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 3)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf2_and8bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf2_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 5)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf2_and7bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf2_and7bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 127
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 5)
+ %b = and i64 %a, 127
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf4_and9bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf4_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 6)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf4_and8bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf4_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 6)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf8_and10bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf8_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 7)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvl_e64mf8_and9bits(i64 %avl) {
+; CHECK-LABEL: vsetvl_e64mf8_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 7)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
new file mode 100644
index 00000000000000..b2a676dc0daf4d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
@@ -0,0 +1,626 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+declare i64 @llvm.riscv.vsetvlimax(i64, i64);
+
+define signext i32 @vsetvlmax_sext() {
+; CHECK-LABEL: vsetvlmax_sext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+
+define zeroext i32 @vsetvlmax_zext() {
+; CHECK-LABEL: vsetvlmax_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)
+ %b = trunc i64 %a to i32
+ ret i32 %b
+}
+
+define i64 @vsetvlmax_e8m1_and14bits() {
+; CHECK-LABEL: vsetvlmax_e8m1_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 0)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m1_and13bits() {
+; CHECK-LABEL: vsetvlmax_e8m1_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 0)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m2_and15bits() {
+; CHECK-LABEL: vsetvlmax_e8m2_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 1)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m2_and14bits() {
+; CHECK-LABEL: vsetvlmax_e8m2_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 1)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m4_and16bits() {
+; CHECK-LABEL: vsetvlmax_e8m4_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 2)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m4_and15bits() {
+; CHECK-LABEL: vsetvlmax_e8m4_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 49
+; CHECK-NEXT: srli a0, a0, 49
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 2)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m8_and17bits() {
+; CHECK-LABEL: vsetvlmax_e8m8_and17bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 3)
+ %b = and i64 %a, 131071
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8m8_and16bits() {
+; CHECK-LABEL: vsetvlmax_e8m8_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 48
+; CHECK-NEXT: srli a0, a0, 48
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 3)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf2_and11bits() {
+; CHECK-LABEL: vsetvlmax_e8mf2_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 5)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf2_and10bits() {
+; CHECK-LABEL: vsetvlmax_e8mf2_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 5)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf4_and12bits() {
+; CHECK-LABEL: vsetvlmax_e8mf4_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 6)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf4_and11bits() {
+; CHECK-LABEL: vsetvlmax_e8mf4_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 6)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf8_and13bits() {
+; CHECK-LABEL: vsetvlmax_e8mf8_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 7)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e8mf8_and12bits() {
+; CHECK-LABEL: vsetvlmax_e8mf8_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 0, i64 7)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m1_and13bits() {
+; CHECK-LABEL: vsetvlmax_e16m1_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 0)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m1_and12bits() {
+; CHECK-LABEL: vsetvlmax_e16m1_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 0)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m2_and14bits() {
+; CHECK-LABEL: vsetvlmax_e16m2_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m2_and13bits() {
+; CHECK-LABEL: vsetvlmax_e16m2_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m4_and15bits() {
+; CHECK-LABEL: vsetvlmax_e16m4_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 2)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m4_and14bits() {
+; CHECK-LABEL: vsetvlmax_e16m4_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 2)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m8_and16bits() {
+; CHECK-LABEL: vsetvlmax_e16m8_and16bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 3)
+ %b = and i64 %a, 65535
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16m8_and15bits() {
+; CHECK-LABEL: vsetvlmax_e16m8_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 49
+; CHECK-NEXT: srli a0, a0, 49
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 3)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf2_and10bits() {
+; CHECK-LABEL: vsetvlmax_e16mf2_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 5)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf2_and9bits() {
+; CHECK-LABEL: vsetvlmax_e16mf2_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 5)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf4_and11bits() {
+; CHECK-LABEL: vsetvlmax_e16mf4_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 6)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf4_and10bits() {
+; CHECK-LABEL: vsetvlmax_e16mf4_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 6)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf8_and12bits() {
+; CHECK-LABEL: vsetvlmax_e16mf8_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 7)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e16mf8_and11bits() {
+; CHECK-LABEL: vsetvlmax_e16mf8_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 7)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m1_and12bits() {
+; CHECK-LABEL: vsetvlmax_e32m1_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 0)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m1_and11bits() {
+; CHECK-LABEL: vsetvlmax_e32m1_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 0)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m2_and13bits() {
+; CHECK-LABEL: vsetvlmax_e32m2_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 1)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m2_and12bits() {
+; CHECK-LABEL: vsetvlmax_e32m2_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 1)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m4_and14bits() {
+; CHECK-LABEL: vsetvlmax_e32m4_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 2)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m4_and13bits() {
+; CHECK-LABEL: vsetvlmax_e32m4_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 2)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m8_and15bits() {
+; CHECK-LABEL: vsetvlmax_e32m8_and15bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 3)
+ %b = and i64 %a, 32767
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32m8_and14bits() {
+; CHECK-LABEL: vsetvlmax_e32m8_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 50
+; CHECK-NEXT: srli a0, a0, 50
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 3)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf2_and9bits() {
+; CHECK-LABEL: vsetvlmax_e32mf2_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 5)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf2_and8bits() {
+; CHECK-LABEL: vsetvlmax_e32mf2_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 5)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf4_and10bits() {
+; CHECK-LABEL: vsetvlmax_e32mf4_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 6)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf4_and9bits() {
+; CHECK-LABEL: vsetvlmax_e32mf4_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 6)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf8_and11bits() {
+; CHECK-LABEL: vsetvlmax_e32mf8_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 7)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e32mf8_and10bits() {
+; CHECK-LABEL: vsetvlmax_e32mf8_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 7)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m1_and11bits() {
+; CHECK-LABEL: vsetvlmax_e64m1_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m1_and10bits() {
+; CHECK-LABEL: vsetvlmax_e64m1_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: andi a0, a0, 1023
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m2_and12bits() {
+; CHECK-LABEL: vsetvlmax_e64m2_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 1)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m2_and11bits() {
+; CHECK-LABEL: vsetvlmax_e64m2_and11bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: andi a0, a0, 2047
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 1)
+ %b = and i64 %a, 2047
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m4_and13bits() {
+; CHECK-LABEL: vsetvlmax_e64m4_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 2)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m4_and12bits() {
+; CHECK-LABEL: vsetvlmax_e64m4_and12bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: slli a0, a0, 52
+; CHECK-NEXT: srli a0, a0, 52
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 2)
+ %b = and i64 %a, 4095
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m8_and14bits() {
+; CHECK-LABEL: vsetvlmax_e64m8_and14bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 3)
+ %b = and i64 %a, 16383
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64m8_and13bits() {
+; CHECK-LABEL: vsetvlmax_e64m8_and13bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: slli a0, a0, 51
+; CHECK-NEXT: srli a0, a0, 51
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 3)
+ %b = and i64 %a, 8191
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf2_and8bits() {
+; CHECK-LABEL: vsetvlmax_e64mf2_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf8, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 5)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf2_and7bits() {
+; CHECK-LABEL: vsetvlmax_e64mf2_and7bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf8, ta, ma
+; CHECK-NEXT: andi a0, a0, 127
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 5)
+ %b = and i64 %a, 127
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf4_and9bits() {
+; CHECK-LABEL: vsetvlmax_e64mf4_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf4, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 6)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf4_and8bits() {
+; CHECK-LABEL: vsetvlmax_e64mf4_and8bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf4, ta, ma
+; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 6)
+ %b = and i64 %a, 255
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf8_and10bits() {
+; CHECK-LABEL: vsetvlmax_e64mf8_and10bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf2, ta, ma
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 7)
+ %b = and i64 %a, 1023
+ ret i64 %b
+}
+
+define i64 @vsetvlmax_e64mf8_and9bits() {
+; CHECK-LABEL: vsetvlmax_e64mf8_and9bits:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, mf2, ta, ma
+; CHECK-NEXT: andi a0, a0, 511
+; CHECK-NEXT: ret
+ %a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 7)
+ %b = and i64 %a, 511
+ ret i64 %b
+}
More information about the llvm-commits
mailing list