[llvm] 0f6b046 - [DAG] SimplifyDemandedBits - relax "xor (X >> ShiftC), XorC --> (not X) >> ShiftC" to match only demanded bits

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 19 03:01:06 PDT 2022


Author: Simon Pilgrim
Date: 2022-07-19T10:59:07+01:00
New Revision: 0f6b0461b01dfb1be4b359a1b9b328e1085f81d8

URL: https://github.com/llvm/llvm-project/commit/0f6b0461b01dfb1be4b359a1b9b328e1085f81d8
DIFF: https://github.com/llvm/llvm-project/commit/0f6b0461b01dfb1be4b359a1b9b328e1085f81d8.diff

LOG: [DAG] SimplifyDemandedBits - relax "xor (X >> ShiftC), XorC --> (not X) >> ShiftC" to match only demanded bits

The "xor (X >> ShiftC), XorC --> (not X) >> ShiftC" fold is currently limited to the XOR mask being a shifted all-bits mask, but we can relax this to only need to match under the demanded bits.

This helps expose more bit extraction/clearing patterns and fixes the PowerPC testCompares*.ll regressions from D127115

Alive2: https://alive2.llvm.org/ce/z/fl7T7K

Differential Revision: https://reviews.llvm.org/D129933

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/ARM/ARMISelLowering.h
    llvm/test/CodeGen/Mips/bittest.ll
    llvm/test/CodeGen/RISCV/bittest.ll
    llvm/test/CodeGen/X86/speculative-load-hardening.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 815f0a2324b28..1bb2a8e50c075 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3886,6 +3886,14 @@ class TargetLowering : public TargetLoweringBase {
     return true;
   }
 
+  /// Return true if it is profitable to combine an XOR of a logical shift
+  /// to create a logical shift of NOT. This transformation may not be desirable
+  /// if it disrupts a particularly auspicious target-specific tree (e.g.
+  /// BIC on ARM/AArch64). By default, it returns true.
+  virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
+    return true;
+  }
+
   /// Return true if the target has native support for the specified value type
   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
   /// i16 is legal, but undesirable since i16 instruction encodings are longer

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 7c273bfa88e55..c312fca616a57 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1533,8 +1533,10 @@ bool TargetLowering::SimplifyDemandedBits(
             APInt Ones = APInt::getAllOnes(BitWidth);
             Ones = Op0Opcode == ISD::SHL ? Ones.shl(ShiftAmt)
                                          : Ones.lshr(ShiftAmt);
-            if (C->getAPIntValue() == Ones) {
-              // If the xor constant is a shifted -1, do a 'not' before the
+            const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo();
+            if ((DemandedBits & C->getAPIntValue()) == (DemandedBits & Ones) &&
+                TLI.isDesirableToCommuteXorWithShift(Op.getNode())) {
+              // If the xor constant is a demanded mask, do a 'not' before the
               // shift:
               // xor (X << ShiftC), XorC --> (not X) << ShiftC
               // xor (X >> ShiftC), XorC --> (not X) >> ShiftC

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 511351df79ea0..cb20554e6a6be 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13613,6 +13613,30 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   return true;
 }
 
+bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
+    const SDNode *N) const {
+  assert(N->getOpcode() == ISD::XOR &&
+         (N->getOperand(0).getOpcode() == ISD::SHL ||
+          N->getOperand(0).getOpcode() == ISD::SRL) &&
+         "Expected XOR(SHIFT) pattern");
+
+  // Only commute if the entire NOT mask is a hidden shifted mask.
+  auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
+  if (XorC && ShiftC) {
+    unsigned MaskIdx, MaskLen;
+    if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
+      unsigned ShiftAmt = ShiftC->getZExtValue();
+      unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
+      if (N->getOperand(0).getOpcode() == ISD::SHL)
+        return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
+      return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
+    }
+  }
+
+  return false;
+}
+
 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
     const SDNode *N, CombineLevel Level) const {
   assert(((N->getOpcode() == ISD::SHL &&

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index fcff5e04df948..f59692bd8e7af 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -657,6 +657,9 @@ class AArch64TargetLowering : public TargetLowering {
   bool isDesirableToCommuteWithShift(const SDNode *N,
                                      CombineLevel Level) const override;
 
+  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
+  bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
+
   /// Return true if it is profitable to fold a pair of shifts into a mask.
   bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                          CombineLevel Level) const override;

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index fcc79a380baae..f34fc70f55729 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13609,6 +13609,30 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   return false;
 }
 
+bool ARMTargetLowering::isDesirableToCommuteXorWithShift(
+    const SDNode *N) const {
+  assert(N->getOpcode() == ISD::XOR &&
+         (N->getOperand(0).getOpcode() == ISD::SHL ||
+          N->getOperand(0).getOpcode() == ISD::SRL) &&
+         "Expected XOR(SHIFT) pattern");
+
+  // Only commute if the entire NOT mask is a hidden shifted mask.
+  auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
+  if (XorC && ShiftC) {
+    unsigned MaskIdx, MaskLen;
+    if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
+      unsigned ShiftAmt = ShiftC->getZExtValue();
+      unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
+      if (N->getOperand(0).getOpcode() == ISD::SHL)
+        return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
+      return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
+    }
+  }
+
+  return false;
+}
+
 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
     const SDNode *N, CombineLevel Level) const {
   assert(((N->getOpcode() == ISD::SHL &&

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 10f60ab93ae35..fae279ea7569e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -733,6 +733,8 @@ class VectorType;
     bool isDesirableToCommuteWithShift(const SDNode *N,
                                        CombineLevel Level) const override;
 
+    bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
+
     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                            CombineLevel Level) const override;
 

diff  --git a/llvm/test/CodeGen/Mips/bittest.ll b/llvm/test/CodeGen/Mips/bittest.ll
index 3e6be4e160760..167140c83b066 100644
--- a/llvm/test/CodeGen/Mips/bittest.ll
+++ b/llvm/test/CodeGen/Mips/bittest.ll
@@ -149,29 +149,27 @@ define signext i32 @bittest_15_i32(i32 signext %a) nounwind {
 define signext i32 @bittest_16_i32(i32 signext %a) nounwind {
 ; MIPS-LABEL: bittest_16_i32:
 ; MIPS:       # %bb.0:
-; MIPS-NEXT:    srl $1, $4, 16
-; MIPS-NEXT:    not $1, $1
+; MIPS-NEXT:    not $1, $4
+; MIPS-NEXT:    srl $1, $1, 16
 ; MIPS-NEXT:    jr $ra
 ; MIPS-NEXT:    andi $2, $1, 1
 ;
 ; MIPS32R2-LABEL: bittest_16_i32:
 ; MIPS32R2:       # %bb.0:
-; MIPS32R2-NEXT:    srl $1, $4, 16
-; MIPS32R2-NEXT:    not $1, $1
+; MIPS32R2-NEXT:    not $1, $4
 ; MIPS32R2-NEXT:    jr $ra
-; MIPS32R2-NEXT:    andi $2, $1, 1
+; MIPS32R2-NEXT:    ext $2, $1, 16, 1
 ;
 ; MIPS32R6-LABEL: bittest_16_i32:
 ; MIPS32R6:       # %bb.0:
-; MIPS32R6-NEXT:    srl $1, $4, 16
-; MIPS32R6-NEXT:    not $1, $1
+; MIPS32R6-NEXT:    not $1, $4
 ; MIPS32R6-NEXT:    jr $ra
-; MIPS32R6-NEXT:    andi $2, $1, 1
+; MIPS32R6-NEXT:    ext $2, $1, 16, 1
 ;
 ; MIPS64-LABEL: bittest_16_i32:
 ; MIPS64:       # %bb.0:
-; MIPS64-NEXT:    srl $1, $4, 16
-; MIPS64-NEXT:    not $1, $1
+; MIPS64-NEXT:    not $1, $4
+; MIPS64-NEXT:    srl $1, $1, 16
 ; MIPS64-NEXT:    andi $1, $1, 1
 ; MIPS64-NEXT:    dsll $1, $1, 32
 ; MIPS64-NEXT:    jr $ra
@@ -179,32 +177,28 @@ define signext i32 @bittest_16_i32(i32 signext %a) nounwind {
 ;
 ; MIPS64R2-LABEL: bittest_16_i32:
 ; MIPS64R2:       # %bb.0:
-; MIPS64R2-NEXT:    srl $1, $4, 16
-; MIPS64R2-NEXT:    not $1, $1
-; MIPS64R2-NEXT:    andi $1, $1, 1
+; MIPS64R2-NEXT:    not $1, $4
+; MIPS64R2-NEXT:    ext $1, $1, 16, 1
 ; MIPS64R2-NEXT:    jr $ra
 ; MIPS64R2-NEXT:    dext $2, $1, 0, 32
 ;
 ; MIPS64R6-LABEL: bittest_16_i32:
 ; MIPS64R6:       # %bb.0:
-; MIPS64R6-NEXT:    srl $1, $4, 16
-; MIPS64R6-NEXT:    not $1, $1
-; MIPS64R6-NEXT:    andi $1, $1, 1
+; MIPS64R6-NEXT:    not $1, $4
+; MIPS64R6-NEXT:    ext $1, $1, 16, 1
 ; MIPS64R6-NEXT:    jr $ra
 ; MIPS64R6-NEXT:    dext $2, $1, 0, 32
 ;
 ; MM32R3-LABEL: bittest_16_i32:
 ; MM32R3:       # %bb.0:
-; MM32R3-NEXT:    srl $2, $4, 16
-; MM32R3-NEXT:    not16 $2, $2
-; MM32R3-NEXT:    andi16 $2, $2, 1
-; MM32R3-NEXT:    jrc $ra
+; MM32R3-NEXT:    not16 $2, $4
+; MM32R3-NEXT:    jr $ra
+; MM32R3-NEXT:    ext $2, $2, 16, 1
 ;
 ; MM32R6-LABEL: bittest_16_i32:
 ; MM32R6:       # %bb.0:
-; MM32R6-NEXT:    srl $2, $4, 16
-; MM32R6-NEXT:    not16 $2, $2
-; MM32R6-NEXT:    andi16 $2, $2, 1
+; MM32R6-NEXT:    not16 $2, $4
+; MM32R6-NEXT:    ext $2, $2, 16, 1
 ; MM32R6-NEXT:    jrc $ra
   %shr = lshr i32 %a, 16
   %not = xor i32 %shr, -1
@@ -399,65 +393,59 @@ define i64 @bittest_15_i64(i64 %a) nounwind {
 define i64 @bittest_16_i64(i64 %a) nounwind {
 ; MIPS-LABEL: bittest_16_i64:
 ; MIPS:       # %bb.0:
-; MIPS-NEXT:    srl $1, $5, 16
-; MIPS-NEXT:    not $1, $1
+; MIPS-NEXT:    not $1, $5
+; MIPS-NEXT:    srl $1, $1, 16
 ; MIPS-NEXT:    andi $3, $1, 1
 ; MIPS-NEXT:    jr $ra
 ; MIPS-NEXT:    addiu $2, $zero, 0
 ;
 ; MIPS32R2-LABEL: bittest_16_i64:
 ; MIPS32R2:       # %bb.0:
-; MIPS32R2-NEXT:    srl $1, $5, 16
-; MIPS32R2-NEXT:    not $1, $1
-; MIPS32R2-NEXT:    andi $3, $1, 1
+; MIPS32R2-NEXT:    not $1, $5
+; MIPS32R2-NEXT:    ext $3, $1, 16, 1
 ; MIPS32R2-NEXT:    jr $ra
 ; MIPS32R2-NEXT:    addiu $2, $zero, 0
 ;
 ; MIPS32R6-LABEL: bittest_16_i64:
 ; MIPS32R6:       # %bb.0:
-; MIPS32R6-NEXT:    srl $1, $5, 16
-; MIPS32R6-NEXT:    not $1, $1
-; MIPS32R6-NEXT:    andi $3, $1, 1
+; MIPS32R6-NEXT:    not $1, $5
+; MIPS32R6-NEXT:    ext $3, $1, 16, 1
 ; MIPS32R6-NEXT:    jr $ra
 ; MIPS32R6-NEXT:    addiu $2, $zero, 0
 ;
 ; MIPS64-LABEL: bittest_16_i64:
 ; MIPS64:       # %bb.0:
-; MIPS64-NEXT:    dsrl $1, $4, 16
-; MIPS64-NEXT:    daddiu $2, $zero, -1
-; MIPS64-NEXT:    xor $1, $1, $2
+; MIPS64-NEXT:    daddiu $1, $zero, -1
+; MIPS64-NEXT:    xor $1, $4, $1
+; MIPS64-NEXT:    dsrl $1, $1, 16
 ; MIPS64-NEXT:    jr $ra
 ; MIPS64-NEXT:    andi $2, $1, 1
 ;
 ; MIPS64R2-LABEL: bittest_16_i64:
 ; MIPS64R2:       # %bb.0:
-; MIPS64R2-NEXT:    dsrl $1, $4, 16
-; MIPS64R2-NEXT:    daddiu $2, $zero, -1
-; MIPS64R2-NEXT:    xor $1, $1, $2
+; MIPS64R2-NEXT:    daddiu $1, $zero, -1
+; MIPS64R2-NEXT:    xor $1, $4, $1
 ; MIPS64R2-NEXT:    jr $ra
-; MIPS64R2-NEXT:    andi $2, $1, 1
+; MIPS64R2-NEXT:    dext $2, $1, 16, 1
 ;
 ; MIPS64R6-LABEL: bittest_16_i64:
 ; MIPS64R6:       # %bb.0:
-; MIPS64R6-NEXT:    dsrl $1, $4, 16
-; MIPS64R6-NEXT:    daddiu $2, $zero, -1
-; MIPS64R6-NEXT:    xor $1, $1, $2
+; MIPS64R6-NEXT:    daddiu $1, $zero, -1
+; MIPS64R6-NEXT:    xor $1, $4, $1
 ; MIPS64R6-NEXT:    jr $ra
-; MIPS64R6-NEXT:    andi $2, $1, 1
+; MIPS64R6-NEXT:    dext $2, $1, 16, 1
 ;
 ; MM32R3-LABEL: bittest_16_i64:
 ; MM32R3:       # %bb.0:
-; MM32R3-NEXT:    srl $2, $5, 16
-; MM32R3-NEXT:    not16 $2, $2
-; MM32R3-NEXT:    andi16 $3, $2, 1
+; MM32R3-NEXT:    not16 $2, $5
+; MM32R3-NEXT:    ext $3, $2, 16, 1
 ; MM32R3-NEXT:    li16 $2, 0
 ; MM32R3-NEXT:    jrc $ra
 ;
 ; MM32R6-LABEL: bittest_16_i64:
 ; MM32R6:       # %bb.0:
-; MM32R6-NEXT:    srl $2, $5, 16
-; MM32R6-NEXT:    not16 $2, $2
-; MM32R6-NEXT:    andi16 $3, $2, 1
+; MM32R6-NEXT:    not16 $2, $5
+; MM32R6-NEXT:    ext $3, $2, 16, 1
 ; MM32R6-NEXT:    li16 $2, 0
 ; MM32R6-NEXT:    jrc $ra
   %shr = lshr i64 %a, 16
@@ -490,27 +478,25 @@ define i64 @bittest_31_i64(i64 %a) nounwind {
 ;
 ; MIPS64-LABEL: bittest_31_i64:
 ; MIPS64:       # %bb.0:
-; MIPS64-NEXT:    dsrl $1, $4, 31
-; MIPS64-NEXT:    daddiu $2, $zero, -1
-; MIPS64-NEXT:    xor $1, $1, $2
+; MIPS64-NEXT:    daddiu $1, $zero, -1
+; MIPS64-NEXT:    xor $1, $4, $1
+; MIPS64-NEXT:    dsrl $1, $1, 31
 ; MIPS64-NEXT:    jr $ra
 ; MIPS64-NEXT:    andi $2, $1, 1
 ;
 ; MIPS64R2-LABEL: bittest_31_i64:
 ; MIPS64R2:       # %bb.0:
-; MIPS64R2-NEXT:    dsrl $1, $4, 31
-; MIPS64R2-NEXT:    daddiu $2, $zero, -1
-; MIPS64R2-NEXT:    xor $1, $1, $2
+; MIPS64R2-NEXT:    daddiu $1, $zero, -1
+; MIPS64R2-NEXT:    xor $1, $4, $1
 ; MIPS64R2-NEXT:    jr $ra
-; MIPS64R2-NEXT:    andi $2, $1, 1
+; MIPS64R2-NEXT:    dext $2, $1, 31, 1
 ;
 ; MIPS64R6-LABEL: bittest_31_i64:
 ; MIPS64R6:       # %bb.0:
-; MIPS64R6-NEXT:    dsrl $1, $4, 31
-; MIPS64R6-NEXT:    daddiu $2, $zero, -1
-; MIPS64R6-NEXT:    xor $1, $1, $2
+; MIPS64R6-NEXT:    daddiu $1, $zero, -1
+; MIPS64R6-NEXT:    xor $1, $4, $1
 ; MIPS64R6-NEXT:    jr $ra
-; MIPS64R6-NEXT:    andi $2, $1, 1
+; MIPS64R6-NEXT:    dext $2, $1, 31, 1
 ;
 ; MM32R3-LABEL: bittest_31_i64:
 ; MM32R3:       # %bb.0:
@@ -555,27 +541,25 @@ define i64 @bittest_32_i64(i64 %a) nounwind {
 ;
 ; MIPS64-LABEL: bittest_32_i64:
 ; MIPS64:       # %bb.0:
-; MIPS64-NEXT:    dsrl $1, $4, 32
-; MIPS64-NEXT:    daddiu $2, $zero, -1
-; MIPS64-NEXT:    xor $1, $1, $2
+; MIPS64-NEXT:    daddiu $1, $zero, -1
+; MIPS64-NEXT:    xor $1, $4, $1
+; MIPS64-NEXT:    dsrl $1, $1, 32
 ; MIPS64-NEXT:    jr $ra
 ; MIPS64-NEXT:    andi $2, $1, 1
 ;
 ; MIPS64R2-LABEL: bittest_32_i64:
 ; MIPS64R2:       # %bb.0:
-; MIPS64R2-NEXT:    dsrl $1, $4, 32
-; MIPS64R2-NEXT:    daddiu $2, $zero, -1
-; MIPS64R2-NEXT:    xor $1, $1, $2
+; MIPS64R2-NEXT:    daddiu $1, $zero, -1
+; MIPS64R2-NEXT:    xor $1, $4, $1
 ; MIPS64R2-NEXT:    jr $ra
-; MIPS64R2-NEXT:    andi $2, $1, 1
+; MIPS64R2-NEXT:    dextu $2, $1, 32, 1
 ;
 ; MIPS64R6-LABEL: bittest_32_i64:
 ; MIPS64R6:       # %bb.0:
-; MIPS64R6-NEXT:    dsrl $1, $4, 32
-; MIPS64R6-NEXT:    daddiu $2, $zero, -1
-; MIPS64R6-NEXT:    xor $1, $1, $2
+; MIPS64R6-NEXT:    daddiu $1, $zero, -1
+; MIPS64R6-NEXT:    xor $1, $4, $1
 ; MIPS64R6-NEXT:    jr $ra
-; MIPS64R6-NEXT:    andi $2, $1, 1
+; MIPS64R6-NEXT:    dextu $2, $1, 32, 1
 ;
 ; MM32R3-LABEL: bittest_32_i64:
 ; MM32R3:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index 163a3076bb449..fd31d28cba795 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,NOZBS,RV32,RV32I
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV32,RV32I
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefixes=CHECK,NOZBS,RV64,RV64I
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64,RV64I
 ; RUN: llc -mtriple=riscv32 -mattr=+zbs -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,ZBS,RV32,RV32ZBS
 ; RUN: llc -mtriple=riscv64 -mattr=+zbs -verify-machineinstrs < %s \
@@ -33,17 +33,24 @@ define signext i32 @bittest_10_i32(i32 signext %a) nounwind {
 }
 
 define signext i32 @bittest_11_i32(i32 signext %a) nounwind {
-; NOZBS-LABEL: bittest_11_i32:
-; NOZBS:       # %bb.0:
-; NOZBS-NEXT:    srli a0, a0, 11
-; NOZBS-NEXT:    not a0, a0
-; NOZBS-NEXT:    andi a0, a0, 1
-; NOZBS-NEXT:    ret
+; RV32I-LABEL: bittest_11_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    slli a0, a0, 20
+; RV32I-NEXT:    srli a0, a0, 31
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: bittest_11_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    slli a0, a0, 52
+; RV64I-NEXT:    srli a0, a0, 63
+; RV64I-NEXT:    ret
 ;
 ; ZBS-LABEL: bittest_11_i32:
 ; ZBS:       # %bb.0:
+; ZBS-NEXT:    not a0, a0
 ; ZBS-NEXT:    bexti a0, a0, 11
-; ZBS-NEXT:    xori a0, a0, 1
 ; ZBS-NEXT:    ret
   %shr = lshr i32 %a, 11
   %not = xor i32 %shr, -1
@@ -110,30 +117,30 @@ define i64 @bittest_10_i64(i64 %a) nounwind {
 define i64 @bittest_11_i64(i64 %a) nounwind {
 ; RV32I-LABEL: bittest_11_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 11
 ; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    slli a0, a0, 20
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: bittest_11_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 11
 ; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    slli a0, a0, 52
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBS-LABEL: bittest_11_i64:
 ; RV32ZBS:       # %bb.0:
+; RV32ZBS-NEXT:    not a0, a0
 ; RV32ZBS-NEXT:    bexti a0, a0, 11
-; RV32ZBS-NEXT:    xori a0, a0, 1
 ; RV32ZBS-NEXT:    li a1, 0
 ; RV32ZBS-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: bittest_11_i64:
 ; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    not a0, a0
 ; RV64ZBS-NEXT:    bexti a0, a0, 11
-; RV64ZBS-NEXT:    xori a0, a0, 1
 ; RV64ZBS-NEXT:    ret
   %shr = lshr i64 %a, 11
   %not = xor i64 %shr, -1
@@ -149,18 +156,11 @@ define i64 @bittest_31_i64(i64 %a) nounwind {
 ; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 ;
-; RV64I-LABEL: bittest_31_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 31
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBS-LABEL: bittest_31_i64:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    bexti a0, a0, 31
-; RV64ZBS-NEXT:    xori a0, a0, 1
-; RV64ZBS-NEXT:    ret
+; RV64-LABEL: bittest_31_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    not a0, a0
+; RV64-NEXT:    srliw a0, a0, 31
+; RV64-NEXT:    ret
   %shr = lshr i64 %a, 31
   %not = xor i64 %shr, -1
   %and = and i64 %not, 1
@@ -177,15 +177,15 @@ define i64 @bittest_32_i64(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: bittest_32_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: bittest_32_i64:
 ; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    not a0, a0
 ; RV64ZBS-NEXT:    bexti a0, a0, 32
-; RV64ZBS-NEXT:    xori a0, a0, 1
 ; RV64ZBS-NEXT:    ret
   %shr = lshr i64 %a, 32
   %not = xor i64 %shr, -1

diff  --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index f87ef98dfe98d..315b6f2e6b248 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -911,8 +911,8 @@ define void @test_deferred_hardening(ptr %ptr1, ptr %ptr2, i32 %x) nounwind spec
 ; X64-NEXT:    cmpq $.Lslh_ret_addr23, %rcx
 ; X64-NEXT:    cmovneq %r15, %rax
 ; X64-NEXT:    movswl (%rbx), %edi
-; X64-NEXT:    shrl $7, %edi
 ; X64-NEXT:    notl %edi
+; X64-NEXT:    shrl $7, %edi
 ; X64-NEXT:    orl $-65536, %edi # imm = 0xFFFF0000
 ; X64-NEXT:    orl %eax, %edi
 ; X64-NEXT:    shlq $47, %rax
@@ -964,8 +964,8 @@ define void @test_deferred_hardening(ptr %ptr1, ptr %ptr2, i32 %x) nounwind spec
 ; X64-LFENCE-NEXT:    shll $7, %edi
 ; X64-LFENCE-NEXT:    callq sink at PLT
 ; X64-LFENCE-NEXT:    movswl (%rbx), %edi
-; X64-LFENCE-NEXT:    shrl $7, %edi
 ; X64-LFENCE-NEXT:    notl %edi
+; X64-LFENCE-NEXT:    shrl $7, %edi
 ; X64-LFENCE-NEXT:    orl $-65536, %edi # imm = 0xFFFF0000
 ; X64-LFENCE-NEXT:    callq sink at PLT
 ; X64-LFENCE-NEXT:    movzwl (%rbx), %eax


        


More information about the llvm-commits mailing list