[llvm] 1ce3afd - [ValueTracking] Teach computeKnownBits about riscv.vsetvli.opt and riscv.vsetvlimax.opt intrinsics.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 27 16:40:16 PST 2023
Author: Craig Topper
Date: 2023-01-27T16:26:38-08:00
New Revision: 1ce3afd97784b7e638c6a562451dc008a6bc3907
URL: https://github.com/llvm/llvm-project/commit/1ce3afd97784b7e638c6a562451dc008a6bc3907
DIFF: https://github.com/llvm/llvm-project/commit/1ce3afd97784b7e638c6a562451dc008a6bc3907.diff
LOG: [ValueTracking] Teach computeKnownBits about riscv.vsetvli.opt and riscv.vsetvlimax.opt intrinsics.
These are like the intrinsic without opt, but don't have side effects.
Add missing test cases for riscv.vsetvlimax.
Added:
Modified:
llvm/lib/Analysis/ValueTracking.cpp
llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 4f1b282b95f3f..e0af42d4cc308 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1735,7 +1735,9 @@ static void computeKnownBitsFromOperator(const Operator *I,
Known.Zero.setBitsFrom(32);
break;
case Intrinsic::riscv_vsetvli:
+ case Intrinsic::riscv_vsetvli_opt:
case Intrinsic::riscv_vsetvlimax:
+ case Intrinsic::riscv_vsetvlimax_opt:
// Assume that VL output is positive and would fit in an int32_t.
// TODO: VLEN might be capped at 16 bits in a future V spec update.
if (BitWidth >= 32)
diff --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
index 4a81082799141..3e3be9b17596c 100644
--- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
@@ -3,6 +3,12 @@
declare i32 @llvm.riscv.vsetvli.i32(i32, i32, i32)
declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64)
+declare i32 @llvm.riscv.vsetvlimax.i32(i32, i32)
+declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64)
+declare i32 @llvm.riscv.vsetvli.opt.i32(i32, i32, i32)
+declare i64 @llvm.riscv.vsetvli.opt.i64(i64, i64, i64)
+declare i32 @llvm.riscv.vsetvlimax.opt.i32(i32, i32)
+declare i64 @llvm.riscv.vsetvlimax.opt.i64(i64, i64)
define i32 @vsetvli_i32() nounwind {
; CHECK-LABEL: @vsetvli_i32(
@@ -41,3 +47,117 @@ entry:
%2 = zext i32 %1 to i64
ret i64 %2
}
+
+define i32 @vsetvlimax_i32() nounwind {
+; CHECK-LABEL: @vsetvlimax_i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 1)
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 1)
+ %1 = and i32 %0, 2147483647
+ ret i32 %1
+}
+
+define i64 @vsetvlimax_sext_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_sext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = sext i32 %1 to i64
+ ret i64 %2
+}
+
+define i64 @vsetvlimax_zext_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_zext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = zext i32 %1 to i64
+ ret i64 %2
+}
+
+define i32 @vsetvli_opt_i32() nounwind {
+; CHECK-LABEL: @vsetvli_opt_i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
+ %1 = and i32 %0, 2147483647
+ ret i32 %1
+}
+
+define i64 @vsetvli_opt_sext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_opt_sext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = sext i32 %1 to i64
+ ret i64 %2
+}
+
+define i64 @vsetvli_opt_zext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_opt_zext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = zext i32 %1 to i64
+ ret i64 %2
+}
+
+define i32 @vsetvlimax_opt_i32() nounwind {
+; CHECK-LABEL: @vsetvlimax_opt_i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
+ %1 = and i32 %0, 2147483647
+ ret i32 %1
+}
+
+define i64 @vsetvlimax_opt_sext_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_opt_sext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = sext i32 %1 to i64
+ ret i64 %2
+}
+
+define i64 @vsetvlimax_opt_zext_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_opt_zext_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+; CHECK-NEXT: ret i64 [[TMP0]]
+;
+entry:
+ %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+ %1 = trunc i64 %0 to i32
+ %2 = zext i32 %1 to i64
+ ret i64 %2
+}
More information about the llvm-commits
mailing list