[llvm] 25391ce - [RISCV] Teach computeKnownBits that vsetvli returns number less than 2^31.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 29 08:08:35 PDT 2021


Author: Craig Topper
Date: 2021-04-29T08:07:59-07:00
New Revision: 25391cec3a81c3c2c4ac3f2ce0efda961766ab07

URL: https://github.com/llvm/llvm-project/commit/25391cec3a81c3c2c4ac3f2ce0efda961766ab07
DIFF: https://github.com/llvm/llvm-project/commit/25391cec3a81c3c2c4ac3f2ce0efda961766ab07.diff

LOG: [RISCV] Teach computeKnownBits that vsetvli returns number less than 2^31.

This seems like a reasonable upper bound on VL. WG discussions for
the V spec would probably allow us to use 2^16 as an upper bound
on VLEN, but this is good enough for now.

This allows us to remove sext and zext if user happens to assign
the size_t result into an int and then uses it as a VL intrinsic
argument which is size_t.

Reviewed By: frasercrmck, rogfer01, arcbbb

Differential Revision: https://reviews.llvm.org/D101472

Added: 
    llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
    llvm/test/Transforms/InstCombine/RISCV/lit.local.cfg
    llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll

Modified: 
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e864e9903d0f4..df3531ad28d6e 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -52,6 +52,7 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
 #include "llvm/IR/IntrinsicsX86.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Metadata.h"
@@ -1639,6 +1640,13 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::x86_sse42_crc32_64_64:
         Known.Zero.setBitsFrom(32);
         break;
+      case Intrinsic::riscv_vsetvli:
+      case Intrinsic::riscv_vsetvlimax:
+        // Assume that VL output is positive and would fit in an int32_t.
+        // TODO: VLEN might be capped at 16 bits in a future V spec update.
+        if (BitWidth >= 32)
+          Known.Zero.setBitsFrom(31);
+        break;
       }
     }
     break;

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 712b8f39e35c0..f0d8757703763 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5784,6 +5784,22 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     // We assume VLENB is at least 16 bytes.
     Known.Zero.setLowBits(4);
     break;
+  case ISD::INTRINSIC_W_CHAIN: {
+    unsigned IntNo = Op.getConstantOperandVal(1);
+    switch (IntNo) {
+    default:
+      // We can't do anything for most intrinsics.
+      break;
+    case Intrinsic::riscv_vsetvli:
+    case Intrinsic::riscv_vsetvlimax:
+      // Assume that VL output is positive and would fit in an int32_t.
+      // TODO: VLEN might be capped at 16 bits in a future V spec update.
+      if (BitWidth >= 32)
+        Known.Zero.setBitsFrom(31);
+      break;
+    }
+    break;
+  }
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
new file mode 100644
index 0000000000000..6f519d8aa9567
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+experimental-v | FileCheck %s
+
+declare i64 @llvm.riscv.vsetvli(
+  i64, i64, i64);
+
+define signext i32 @vsetvl_sext() {
+; CHECK-LABEL: vsetvl_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    ret
+  %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+  %b = trunc i64 %a to i32
+  ret i32 %b
+}
+
+define zeroext i32 @vsetvl_zext() {
+; CHECK-LABEL: vsetvl_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli a0, 1, e16,m2,ta,mu
+; CHECK-NEXT:    ret
+  %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
+  %b = trunc i64 %a to i32
+  ret i32 %b
+}

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/lit.local.cfg b/llvm/test/Transforms/InstCombine/RISCV/lit.local.cfg
new file mode 100644
index 0000000000000..c63820126f8ca
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/RISCV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'RISCV' in config.root.targets:
+    config.unsupported = True

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
new file mode 100644
index 0000000000000..f18604032c334
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare i32 @llvm.riscv.vsetvli.i32(i32, i32, i32)
+declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64)
+
+define i32 @vsetvli_i32() nounwind {
+; CHECK-LABEL: @vsetvli_i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %0 = call i32 @llvm.riscv.vsetvli.i32(i32 1, i32 1, i32 1)
+  %1 = and i32 %0, 2147483647
+  ret i32 %1
+}
+
+define i64 @vsetvli_sext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_sext_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+  %1 = trunc i64 %0 to i32
+  %2 = sext i32 %1 to i64
+  ret i64 %2
+}
+
+define i64 @vsetvli_zext_i64() nounwind {
+; CHECK-LABEL: @vsetvli_zext_i64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+entry:
+  %0 = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
+  %1 = trunc i64 %0 to i32
+  %2 = zext i32 %1 to i64
+  ret i64 %2
+}


        


More information about the llvm-commits mailing list