[llvm] 4ea6d42 - [RISCV] Teach computeKnownBits that vsetvli returns <= 65536.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 27 16:40:18 PST 2023


Author: Craig Topper
Date: 2023-01-27T16:39:54-08:00
New Revision: 4ea6d42b632d166e6ee2f38f6758ed71515a7f7a

URL: https://github.com/llvm/llvm-project/commit/4ea6d42b632d166e6ee2f38f6758ed71515a7f7a
DIFF: https://github.com/llvm/llvm-project/commit/4ea6d42b632d166e6ee2f38f6758ed71515a7f7a.diff

LOG: [RISCV] Teach computeKnownBits that vsetvli returns <= 65536.

Resolves a FIXME. We could do even better taking into account SEW/LMUL.

Added: 
    

Modified: 
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
    llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e0af42d4cc308..59bd6d07ea848 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1738,10 +1738,10 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::riscv_vsetvli_opt:
       case Intrinsic::riscv_vsetvlimax:
       case Intrinsic::riscv_vsetvlimax_opt:
-        // Assume that VL output is positive and would fit in an int32_t.
-        // TODO: VLEN might be capped at 16 bits in a future V spec update.
-        if (BitWidth >= 32)
-          Known.Zero.setBitsFrom(31);
+        // Assume that VL output is >= 65536.
+        // TODO: Take SEW and LMUL into account.
+        if (BitWidth > 17)
+          Known.Zero.setBitsFrom(17);
         break;
       case Intrinsic::vscale: {
         if (!II->getParent() || !II->getFunction() ||

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a8720d070acba..f7c31520b065a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10887,10 +10887,10 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     case Intrinsic::riscv_vsetvlimax:
     case Intrinsic::riscv_vsetvli_opt:
     case Intrinsic::riscv_vsetvlimax_opt:
-      // Assume that VL output is positive and would fit in an int32_t.
-      // TODO: VLEN might be capped at 16 bits in a future V spec update.
-      if (BitWidth >= 32)
-        Known.Zero.setBitsFrom(31);
+      // Assume that VL output is >= 65536.
+      // TODO: Take SEW and LMUL into account.
+      if (BitWidth > 17)
+        Known.Zero.setBitsFrom(17);
       break;
     }
     break;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
index 64b73fc19c549..9ea6a7c3ae3a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
@@ -23,3 +23,13 @@ define zeroext i32 @vsetvl_zext() {
   %b = trunc i64 %a to i32
   ret i32 %b
 }
+
+define i64 @vsetvl_and17bits() {
+; CHECK-LABEL: vsetvl_and17bits:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a0, 1, e16, m2, ta, mu
+; CHECK-NEXT:    ret
+  %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+  %b = and i64 %a, 131071
+  ret i64 %b
+}

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
index 3e3be9b17596c..a1eea5bcc2dd0 100644
--- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
@@ -48,6 +48,30 @@ entry:
   ret i64 %2
 }
 
+define i32 @vsetvli_and17_i32() nounwind {
+; CHECK-LABEL: @vsetvli_and17_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %0 = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+  %1 = and i32 %0, 131071
+  ret i32 %1
+}
+
+define i64 @vsetvli_and17_i64() nounwind {
+; CHECK-LABEL: @vsetvli_and17_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+  %1 = and i64 %0, 131071
+  ret i64 %1
+}
+
 define i32 @vsetvlimax_i32() nounwind {
 ; CHECK-LABEL: @vsetvlimax_i32(
 ; CHECK-NEXT:  entry:
@@ -86,6 +110,30 @@ entry:
   ret i64 %2
 }
 
+define i32 @vsetvlimax_and17_i32() nounwind {
+; CHECK-LABEL: @vsetvlimax_and17_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 1)
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %0 = call i32 @llvm.riscv.vsetvlimax.i32(i32 1, i32 1)
+  %1 = and i32 %0, 131071
+  ret i32 %1
+}
+
+define i64 @vsetvlimax_and17_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_and17_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
+  %1 = and i64 %0, 131071
+  ret i64 %1
+}
+
 define i32 @vsetvli_opt_i32() nounwind {
 ; CHECK-LABEL: @vsetvli_opt_i32(
 ; CHECK-NEXT:  entry:
@@ -124,6 +172,30 @@ entry:
   ret i64 %2
 }
 
+define i32 @vsetvli_opt_and17_i32() nounwind {
+; CHECK-LABEL: @vsetvli_opt_and17_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
+  %1 = and i32 %0, 131071
+  ret i32 %1
+}
+
+define i64 @vsetvli_opt_and17_i64() nounwind {
+; CHECK-LABEL: @vsetvli_opt_and17_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
+  %1 = and i64 %0, 131071
+  ret i64 %1
+}
+
 define i32 @vsetvlimax_opt_i32() nounwind {
 ; CHECK-LABEL: @vsetvlimax_opt_i32(
 ; CHECK-NEXT:  entry:
@@ -161,3 +233,27 @@ entry:
   %2 = zext i32 %1 to i64
   ret i64 %2
 }
+
+define i32 @vsetvlimax_opt_and17_i32() nounwind {
+; CHECK-LABEL: @vsetvlimax_opt_and17_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
+  %1 = and i32 %0, 131071
+  ret i32 %1
+}
+
+define i64 @vsetvlimax_opt_and17_i64() nounwind {
+; CHECK-LABEL: @vsetvlimax_opt_and17_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
+  %1 = and i64 %0, 131071
+  ret i64 %1
+}


        


More information about the llvm-commits mailing list