[llvm] 0852f87 - [X86] X86DAGToDAGISel::matchBitExtract(): support 'num high bits to clear' pattern
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 8 09:27:34 PDT 2021
Author: Roman Lebedev
Date: 2021-09-08T19:27:08+03:00
New Revision: 0852f8706b769c5e648defd40ff825a553d3cd98
URL: https://github.com/llvm/llvm-project/commit/0852f8706b769c5e648defd40ff825a553d3cd98
DIFF: https://github.com/llvm/llvm-project/commit/0852f8706b769c5e648defd40ff825a553d3cd98.diff
LOG: [X86] X86DAGToDAGISel::matchBitExtract(): support 'num high bits to clear' pattern
Currently, we only deal with the case where we can match
the number of low bits to be kept, i.e.:
```
x & ((1 << y) - 1)
```
will extract low `y` bits of `x`.
But what will
```
x & (-1 >> y)
```
do?
Logically, it will extract `bitwidth(x) - y` low bits, i.e.:
```
x & ~(-1 << (bitwidth(x)-y))
```
... except we can't do such a transformation in IR in general,
because if we wanted to extract all the bits `(-1 >> 0)` is fine,
but `-1 << bitwidth(x)` would be `poison`: https://alive2.llvm.org/ce/z/BKJZfw,
Yet, here with BMI's BEXTR and BMI2's BZHI we don't have any such problems with edge-cases.
So what we can do is: https://alive2.llvm.org/ce/z/gm5M2B
As briefly discussed with @craig.topper, this appears to be not worse than what we'd end up with currently (a pair of shifts):
* https://godbolt.org/z/nsPb8bejs (direct data dependency, sequential execution)
* https://godbolt.org/z/7bj3zeh1d (no direct data dependency, parallel execution)
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D107923
Added:
Modified:
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/test/CodeGen/X86/clear-highbits.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 9171d6a00b865..1d1bbf418c2da 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -3391,16 +3391,24 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
return false;
SDValue NBits;
+ bool NegateNBits;
// If we have BMI2's BZHI, we are ok with muti-use patterns.
// Else, if we only have BMI1's BEXTR, we require one-use.
- const bool CanHaveExtraUses = Subtarget->hasBMI2();
- auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
- return CanHaveExtraUses ||
+ const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
+ auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses,
+ Optional<bool> AllowExtraUses) {
+ return AllowExtraUses.getValueOr(AllowExtraUsesByDefault) ||
Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
};
- auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
- auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
+ auto checkOneUse = [checkUses](SDValue Op,
+ Optional<bool> AllowExtraUses = None) {
+ return checkUses(Op, 1, AllowExtraUses);
+ };
+ auto checkTwoUse = [checkUses](SDValue Op,
+ Optional<bool> AllowExtraUses = None) {
+ return checkUses(Op, 2, AllowExtraUses);
+ };
auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
@@ -3413,8 +3421,8 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
};
// a) x & ((1 << nbits) + (-1))
- auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
- &NBits](SDValue Mask) -> bool {
+ auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits,
+ &NegateNBits](SDValue Mask) -> bool {
// Match `add`. Must only have one use!
if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
return false;
@@ -3428,6 +3436,7 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
if (!isOneConstant(M0->getOperand(0)))
return false;
NBits = M0->getOperand(1);
+ NegateNBits = false;
return true;
};
@@ -3440,7 +3449,7 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
// b) x & ~(-1 << nbits)
auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
- &NBits](SDValue Mask) -> bool {
+ &NBits, &NegateNBits](SDValue Mask) -> bool {
// Match `~()`. Must only have one use!
if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
return false;
@@ -3455,32 +3464,35 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
if (!isAllOnes(M0->getOperand(0)))
return false;
NBits = M0->getOperand(1);
+ NegateNBits = false;
return true;
};
- // Match potentially-truncated (bitwidth - y)
- auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
- unsigned Bitwidth) {
- // Skip over a truncate of the shift amount.
- if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
- ShiftAmt = ShiftAmt.getOperand(0);
- // The trunc should have been the only user of the real shift amount.
- if (!checkOneUse(ShiftAmt))
- return false;
- }
- // Match the shift amount as: (bitwidth - y). It should go away, too.
- if (ShiftAmt.getOpcode() != ISD::SUB)
- return false;
- auto *V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
+ // Try to match potentially-truncated shift amount as `(bitwidth - y)`,
+ // or leave the shift amount as-is, but then we'll have to negate it.
+ auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt,
+ unsigned Bitwidth) {
+ NBits = ShiftAmt;
+ NegateNBits = true;
+ // Skip over a truncate of the shift amount, if any.
+ if (NBits.getOpcode() == ISD::TRUNCATE)
+ NBits = NBits.getOperand(0);
+ // Try to match the shift amount as (bitwidth - y). It should go away, too.
+ // If it doesn't match, that's fine, we'll just negate it ourselves.
+ if (NBits.getOpcode() != ISD::SUB)
+ return;
+ auto *V0 = dyn_cast<ConstantSDNode>(NBits.getOperand(0));
if (!V0 || V0->getZExtValue() != Bitwidth)
- return false;
- NBits = ShiftAmt.getOperand(1);
- return true;
+ return;
+ NBits = NBits.getOperand(1);
+ NegateNBits = false;
};
+ // c) x & (-1 >> z) but then we'll have to subtract z from bitwidth
+ // or
// c) x & (-1 >> (32 - y))
- auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
- matchShiftAmt](SDValue Mask) -> bool {
+ auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, &NegateNBits,
+ canonicalizeShiftAmt](SDValue Mask) -> bool {
// The mask itself may be truncated.
Mask = peekThroughOneUseTruncation(Mask);
unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
@@ -3494,27 +3506,39 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
// The shift amount should not be used externally.
if (!checkOneUse(M1))
return false;
- return matchShiftAmt(M1, Bitwidth);
+ canonicalizeShiftAmt(M1, Bitwidth);
+ // Pattern c. is non-canonical, and is expanded into pattern d. iff there
+ // is no extra use of the mask. Clearly, there was one since we are here.
+ // But at the same time, if we need to negate the shift amount,
+ // then we don't want the mask to stick around, else it's unprofitable.
+ return !NegateNBits;
};
SDValue X;
+ // d) x << z >> z but then we'll have to subtract z from bitwidth
+ // or
// d) x << (32 - y) >> (32 - y)
- auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
+ auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt,
+ AllowExtraUsesByDefault, &NegateNBits,
&X](SDNode *Node) -> bool {
if (Node->getOpcode() != ISD::SRL)
return false;
SDValue N0 = Node->getOperand(0);
- if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
+ if (N0->getOpcode() != ISD::SHL)
return false;
unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
SDValue N1 = Node->getOperand(1);
SDValue N01 = N0->getOperand(1);
// Both of the shifts must be by the exact same value.
- // There should not be any uses of the shift amount outside of the pattern.
- if (N1 != N01 || !checkTwoUse(N1))
+ if (N1 != N01)
return false;
- if (!matchShiftAmt(N1, Bitwidth))
+ canonicalizeShiftAmt(N1, Bitwidth);
+ // There should not be any external uses of the inner shift / shift amount.
+ // Note that while we are generally okay with external uses given BMI2,
+ // iff we need to negate the shift amount, we are not okay with extra uses.
+ const bool AllowExtraUses = AllowExtraUsesByDefault && !NegateNBits;
+ if (!checkOneUse(N0, AllowExtraUses) || !checkTwoUse(N1, AllowExtraUses))
return false;
X = N0->getOperand(0);
return true;
@@ -3539,6 +3563,11 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
} else if (!matchPatternD(Node))
return false;
+ // If we need to negate the shift amount, require BMI2 BZHI support.
+ // It's just too unprofitable for BMI1 BEXTR.
+ if (NegateNBits && !Subtarget->hasBMI2())
+ return false;
+
SDLoc DL(Node);
// Truncate the shift amount.
@@ -3553,11 +3582,21 @@ bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
- NBits = SDValue(
- CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
- NBits, SRIdxVal), 0);
+ NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
+ MVT::i32, ImplDef, NBits, SRIdxVal),
+ 0);
insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
+ // We might have matched the amount of high bits to be cleared,
+ // but we want the amount of low bits to be kept, so negate it then.
+ if (NegateNBits) {
+ SDValue BitWidthC = CurDAG->getConstant(NVT.getSizeInBits(), DL, MVT::i32);
+ insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC);
+
+ NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits);
+ insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
+ }
+
if (Subtarget->hasBMI2()) {
// Great, just emit the the BZHI..
if (NVT != MVT::i32) {
diff --git a/llvm/test/CodeGen/X86/clear-highbits.ll b/llvm/test/CodeGen/X86/clear-highbits.ll
index 2e0025312932b..b382054bfff3a 100644
--- a/llvm/test/CodeGen/X86/clear-highbits.ll
+++ b/llvm/test/CodeGen/X86/clear-highbits.ll
@@ -335,8 +335,9 @@ define i32 @clear_highbits32_c0(i32 %val, i32 %numhighbits) nounwind {
; X86-BMI2-LABEL: clear_highbits32_c0:
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: movl $32, %ecx
+; X86-BMI2-NEXT: subl %eax, %ecx
+; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_c0:
@@ -350,8 +351,9 @@ define i32 @clear_highbits32_c0(i32 %val, i32 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits32_c0:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
-; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhil %eax, %edi, %eax
; X64-BMI2-NEXT: retq
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %mask, %val
@@ -370,8 +372,9 @@ define i32 @clear_highbits32_c1_indexzext(i32 %val, i8 %numhighbits) nounwind {
; X86-BMI2-LABEL: clear_highbits32_c1_indexzext:
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: movl $32, %ecx
+; X86-BMI2-NEXT: subl %eax, %ecx
+; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_c1_indexzext:
@@ -385,8 +388,9 @@ define i32 @clear_highbits32_c1_indexzext(i32 %val, i8 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits32_c1_indexzext:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
-; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhil %eax, %edi, %eax
; X64-BMI2-NEXT: retq
%sh_prom = zext i8 %numhighbits to i32
%mask = lshr i32 -1, %sh_prom
@@ -408,8 +412,9 @@ define i32 @clear_highbits32_c2_load(i32* %w, i32 %numhighbits) nounwind {
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X86-BMI2-NEXT: shlxl %ecx, (%eax), %eax
-; X86-BMI2-NEXT: shrxl %ecx, %eax, %eax
+; X86-BMI2-NEXT: movl $32, %edx
+; X86-BMI2-NEXT: subl %ecx, %edx
+; X86-BMI2-NEXT: bzhil %edx, (%eax), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_c2_load:
@@ -423,8 +428,9 @@ define i32 @clear_highbits32_c2_load(i32* %w, i32 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits32_c2_load:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxl %esi, (%rdi), %eax
-; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhil %eax, (%rdi), %eax
; X64-BMI2-NEXT: retq
%val = load i32, i32* %w
%mask = lshr i32 -1, %numhighbits
@@ -446,8 +452,9 @@ define i32 @clear_highbits32_c3_load_indexzext(i32* %w, i8 %numhighbits) nounwin
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X86-BMI2-NEXT: shlxl %ecx, (%eax), %eax
-; X86-BMI2-NEXT: shrxl %ecx, %eax, %eax
+; X86-BMI2-NEXT: movl $32, %edx
+; X86-BMI2-NEXT: subl %ecx, %edx
+; X86-BMI2-NEXT: bzhil %edx, (%eax), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_c3_load_indexzext:
@@ -461,8 +468,9 @@ define i32 @clear_highbits32_c3_load_indexzext(i32* %w, i8 %numhighbits) nounwin
;
; X64-BMI2-LABEL: clear_highbits32_c3_load_indexzext:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxl %esi, (%rdi), %eax
-; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhil %eax, (%rdi), %eax
; X64-BMI2-NEXT: retq
%val = load i32, i32* %w
%sh_prom = zext i8 %numhighbits to i32
@@ -483,8 +491,9 @@ define i32 @clear_highbits32_c4_commutative(i32 %val, i32 %numhighbits) nounwind
; X86-BMI2-LABEL: clear_highbits32_c4_commutative:
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: movl $32, %ecx
+; X86-BMI2-NEXT: subl %eax, %ecx
+; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_c4_commutative:
@@ -498,8 +507,9 @@ define i32 @clear_highbits32_c4_commutative(i32 %val, i32 %numhighbits) nounwind
;
; X64-BMI2-LABEL: clear_highbits32_c4_commutative:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxl %esi, %edi, %eax
-; X64-BMI2-NEXT: shrxl %esi, %eax, %eax
+; X64-BMI2-NEXT: movl $32, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhil %eax, %edi, %eax
; X64-BMI2-NEXT: retq
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %val, %mask ; swapped order
@@ -574,8 +584,9 @@ define i64 @clear_highbits64_c0(i64 %val, i64 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits64_c0:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
-; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax
; X64-BMI2-NEXT: retq
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %mask, %val
@@ -646,9 +657,9 @@ define i64 @clear_highbits64_c1_indexzext(i64 %val, i8 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits64_c1_indexzext:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
-; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
-; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax
; X64-BMI2-NEXT: retq
%sh_prom = zext i8 %numhighbits to i64
%mask = lshr i64 -1, %sh_prom
@@ -729,8 +740,9 @@ define i64 @clear_highbits64_c2_load(i64* %w, i64 %numhighbits) nounwind {
;
; X64-BMI2-LABEL: clear_highbits64_c2_load:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxq %rsi, (%rdi), %rax
-; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhiq %rax, (%rdi), %rax
; X64-BMI2-NEXT: retq
%val = load i64, i64* %w
%mask = lshr i64 -1, %numhighbits
@@ -811,9 +823,9 @@ define i64 @clear_highbits64_c3_load_indexzext(i64* %w, i8 %numhighbits) nounwin
;
; X64-BMI2-LABEL: clear_highbits64_c3_load_indexzext:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi
-; X64-BMI2-NEXT: shlxq %rsi, (%rdi), %rax
-; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhiq %rax, (%rdi), %rax
; X64-BMI2-NEXT: retq
%val = load i64, i64* %w
%sh_prom = zext i8 %numhighbits to i64
@@ -886,8 +898,9 @@ define i64 @clear_highbits64_c4_commutative(i64 %val, i64 %numhighbits) nounwind
;
; X64-BMI2-LABEL: clear_highbits64_c4_commutative:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax
-; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: movl $64, %eax
+; X64-BMI2-NEXT: subl %esi, %eax
+; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax
; X64-BMI2-NEXT: retq
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %val, %mask ; swapped order
@@ -1217,8 +1230,9 @@ define i32 @clear_highbits32_16(i32 %val, i32 %numlowbits) nounwind {
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movb $16, %al
; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: movl $32, %ecx
+; X86-BMI2-NEXT: subl %eax, %ecx
+; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_16:
@@ -1234,8 +1248,9 @@ define i32 @clear_highbits32_16(i32 %val, i32 %numlowbits) nounwind {
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: movb $16, %al
; X64-BMI2-NEXT: subb %sil, %al
-; X64-BMI2-NEXT: shlxl %eax, %edi, %ecx
-; X64-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: movl $32, %ecx
+; X64-BMI2-NEXT: subl %eax, %ecx
+; X64-BMI2-NEXT: bzhil %ecx, %edi, %eax
; X64-BMI2-NEXT: retq
%numhighbits = sub i32 16, %numlowbits
%mask = lshr i32 -1, %numhighbits
@@ -1256,8 +1271,9 @@ define i32 @clear_highbits32_48(i32 %val, i32 %numlowbits) nounwind {
; X86-BMI2: # %bb.0:
; X86-BMI2-NEXT: movb $48, %al
; X86-BMI2-NEXT: subb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: movl $32, %ecx
+; X86-BMI2-NEXT: subl %eax, %ecx
+; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax
; X86-BMI2-NEXT: retl
;
; X64-NOBMI2-LABEL: clear_highbits32_48:
@@ -1273,8 +1289,9 @@ define i32 @clear_highbits32_48(i32 %val, i32 %numlowbits) nounwind {
; X64-BMI2: # %bb.0:
; X64-BMI2-NEXT: movb $48, %al
; X64-BMI2-NEXT: subb %sil, %al
-; X64-BMI2-NEXT: shlxl %eax, %edi, %ecx
-; X64-BMI2-NEXT: shrxl %eax, %ecx, %eax
+; X64-BMI2-NEXT: movl $32, %ecx
+; X64-BMI2-NEXT: subl %eax, %ecx
+; X64-BMI2-NEXT: bzhil %ecx, %edi, %eax
; X64-BMI2-NEXT: retq
%numhighbits = sub i32 48, %numlowbits
%mask = lshr i32 -1, %numhighbits
More information about the llvm-commits
mailing list