[llvm] 9b92f70 - Revert "Reland "[TargetLowering] Teach DemandedBits about VSCALE""

Saleem Abdulrasool via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 10:53:03 PST 2022


Author: Saleem Abdulrasool
Date: 2022-12-19T18:52:29Z
New Revision: 9b92f70d4758f75903ce93feaba5098130820d40

URL: https://github.com/llvm/llvm-project/commit/9b92f70d4758f75903ce93feaba5098130820d40
DIFF: https://github.com/llvm/llvm-project/commit/9b92f70d4758f75903ce93feaba5098130820d40.diff

LOG: Revert "Reland "[TargetLowering] Teach DemandedBits about VSCALE""

This reverts commit 3010f60381bcd828d1b409cfaa576328bcd05bbc.

This change introduced undefined behaviour (reported at
https://reviews.llvm.org/D138508#inline-1352840).  Additionally, it
appears to be responsible for a mis-compilation on RISCV64 with the
vector extension (https://github.com/llvm/llvm-project/issues/59594).
The commit message indicates that this is meant to be ARM64 specific
though is a generic selection change.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 84cf6e9c02772..aa7d7296cd65c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1125,23 +1125,6 @@ bool TargetLowering::SimplifyDemandedBits(
 
   KnownBits Known2;
   switch (Op.getOpcode()) {
-  case ISD::VSCALE: {
-    Function const &F = TLO.DAG.getMachineFunction().getFunction();
-    Attribute const &Attr = F.getFnAttribute(Attribute::VScaleRange);
-    if (!Attr.isValid())
-      return false;
-    std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax();
-    if (!MaxVScale.has_value())
-      return false;
-    APInt VScaleResultUpperbound = *MaxVScale * Op.getConstantOperandAPInt(0);
-    bool Negative = VScaleResultUpperbound.isNegative();
-    if (Negative)
-      VScaleResultUpperbound = ~VScaleResultUpperbound;
-    unsigned RequiredBits = VScaleResultUpperbound.getActiveBits();
-    if (RequiredBits < BitWidth)
-      (Negative ? Known.One : Known.Zero).setHighBits(BitWidth - RequiredBits);
-    return false;
-  }
   case ISD::SCALAR_TO_VECTOR: {
     if (VT.isScalableVector())
       return false;

diff  --git a/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll b/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll
index dbdab799c8352..895f5da9a1e13 100644
--- a/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll
+++ b/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll
@@ -14,8 +14,9 @@ define i32 @vscale_and_elimination() vscale_range(1,16) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    lsr x8, x8, #4
-; CHECK-NEXT:    and w9, w8, #0x1c
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    and w9, w8, #0x1f
+; CHECK-NEXT:    and w8, w8, #0xfffffffc
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %vscale = call i32 @llvm.vscale.i32()
   %and_redundant = and i32 %vscale, 31
@@ -84,7 +85,8 @@ define i64 @vscale_trunc_zext() vscale_range(1,16) {
 ; CHECK-LABEL: vscale_trunc_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    lsr x0, x8, #4
+; CHECK-NEXT:    lsr x8, x8, #4
+; CHECK-NEXT:    and x0, x8, #0xffffffff
 ; CHECK-NEXT:    ret
   %vscale = call i32 @llvm.vscale.i32()
   %zext = zext i32 %vscale to i64
@@ -95,7 +97,8 @@ define i64 @vscale_trunc_sext() vscale_range(1,16) {
 ; CHECK-LABEL: vscale_trunc_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    lsr x0, x8, #4
+; CHECK-NEXT:    lsr x8, x8, #4
+; CHECK-NEXT:    sxtw x0, w8
 ; CHECK-NEXT:    ret
   %vscale = call i32 @llvm.vscale.i32()
   %sext = sext i32 %vscale to i64
@@ -197,8 +200,9 @@ define i32 @vscale_with_multiplier() vscale_range(1,16) {
 ; CHECK-NEXT:    mov w9, #5
 ; CHECK-NEXT:    lsr x8, x8, #4
 ; CHECK-NEXT:    mul x8, x8, x9
-; CHECK-NEXT:    and w9, w8, #0x3f
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    and w9, w8, #0x7f
+; CHECK-NEXT:    and w8, w8, #0x3f
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %vscale = call i32 @llvm.vscale.i32()
   %mul = mul i32 %vscale, 5
@@ -215,8 +219,9 @@ define i32 @vscale_with_negative_multiplier() vscale_range(1,16) {
 ; CHECK-NEXT:    mov x9, #-5
 ; CHECK-NEXT:    lsr x8, x8, #4
 ; CHECK-NEXT:    mul x8, x8, x9
-; CHECK-NEXT:    and w9, w8, #0xffffffc0
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    orr w9, w8, #0xffffff80
+; CHECK-NEXT:    and w8, w8, #0xffffffc0
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %vscale = call i32 @llvm.vscale.i32()
   %mul = mul i32 %vscale, -5
@@ -226,22 +231,6 @@ define i32 @vscale_with_negative_multiplier() vscale_range(1,16) {
   ret i32 %result
 }
 
-define i32 @pow2_vscale_with_negative_multiplier() vscale_range(1,16) {
-; CHECK-LABEL: pow2_vscale_with_negative_multiplier:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    neg x8, x8
-; CHECK-NEXT:    orr w9, w8, #0xfffffff0
-; CHECK-NEXT:    add w0, w8, w9
-; CHECK-NEXT:    ret
-  %vscale = call i32 @llvm.vscale.i32()
-  %mul = mul i32 %vscale, -2
-  %or_redundant = or i32 %mul, 4294967264
-  %or_required = or i32 %mul, 4294967280
-  %result = add i32 %or_redundant, %or_required
-  ret i32 %result
-}
-
 declare i32 @llvm.vscale.i32()
 declare i64 @llvm.aarch64.sve.cntb(i32 %pattern)
 declare i64 @llvm.aarch64.sve.cnth(i32 %pattern)


        


More information about the llvm-commits mailing list