[llvm] r334124 - [NFC][X86][AArch64] Reorganize/cleanup BZHI test patterns

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 6 12:38:10 PDT 2018


Author: lebedevri
Date: Wed Jun  6 12:38:10 2018
New Revision: 334124

URL: http://llvm.org/viewvc/llvm-project?rev=334124&view=rev
Log:
[NFC][X86][AArch64] Reorganize/cleanup BZHI test patterns

Summary:
In D47428, i propose to choose the `~(-(1 << nbits))` as the canonical form of low-bit-mask formation.
As it is seen from these tests, there is a reason for that.

AArch64 currently better handles `~(-(1 << nbits))`, but not the more traditional `(1 << nbits) - 1` (sic!).
The other way around for X86.
It would be much better to canonicalize.

It would seem that there is too much tests, but this is most of all the auto-generated possible variants
of C code that one would expect for BZHI to be formed, and then manually cleaned up a bit.
So this should be pretty representable, which somewhat good coverage...

Related links:
https://bugs.llvm.org/show_bug.cgi?id=36419
https://bugs.llvm.org/show_bug.cgi?id=37603
https://bugs.llvm.org/show_bug.cgi?id=37610
https://rise4fun.com/Alive/idM

Reviewers: javed.absar, craig.topper, RKSimon, spatel

Reviewed By: RKSimon

Subscribers: kristof.beyls, llvm-commits, RKSimon, craig.topper, spatel

Differential Revision: https://reviews.llvm.org/D47452

Added:
    llvm/trunk/test/CodeGen/AArch64/extract-lowbits.ll
    llvm/trunk/test/CodeGen/X86/extract-lowbits.ll
Modified:
    llvm/trunk/test/CodeGen/X86/bmi.ll

Added: llvm/trunk/test/CodeGen/AArch64/extract-lowbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/extract-lowbits.ll?rev=334124&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/extract-lowbits.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/extract-lowbits.ll Wed Jun  6 12:38:10 2018
@@ -0,0 +1,761 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; *Please* keep in sync with test/CodeGen/X86/extract-lowbits.ll
+
+; https://bugs.llvm.org/show_bug.cgi?id=36419
+; https://bugs.llvm.org/show_bug.cgi?id=37603
+; https://bugs.llvm.org/show_bug.cgi?id=37610
+
+; Patterns:
+;   a) x &  (1 << nbits) - 1
+;   b) x & ~(-1 << nbits)
+;   c) x &  (-1 >> (32 - y))
+;   d) x << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    ret
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    ret
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl w9, w9, w1
+; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl w9, w9, w1
+; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    and w0, w0, w8
+; CHECK-NEXT:    ret
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    ret
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    ret
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    lsl x9, x9, x1
+; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    lsl x9, x9, x1
+; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    and x0, x0, x8
+; CHECK-NEXT:    ret
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    bic w0, w0, w8
+; CHECK-NEXT:    ret
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    bic w0, w0, w8
+; CHECK-NEXT:    ret
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    lsl w9, w9, w1
+; CHECK-NEXT:    bic w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    lsl w9, w9, w1
+; CHECK-NEXT:    bic w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    bic w0, w0, w8
+; CHECK-NEXT:    ret
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    bic x0, x0, x8
+; CHECK-NEXT:    ret
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    bic x0, x0, x8
+; CHECK-NEXT:    ret
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    lsl x9, x9, x1
+; CHECK-NEXT:    bic x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    lsl x9, x9, x1
+; CHECK-NEXT:    bic x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    bic x0, x0, x8
+; CHECK-NEXT:    ret
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    lsr w8, w9, w8
+; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    ret
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x20
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    lsr w8, w9, w8
+; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    ret
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    lsr w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x20
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    lsr w9, w10, w9
+; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    lsr w8, w9, w8
+; CHECK-NEXT:    and w0, w0, w8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    lsr x8, x9, x8
+; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    ret
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x40
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    lsr x8, x9, x8
+; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    ret
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    mov x10, #-1
+; CHECK-NEXT:    lsr x9, x10, x9
+; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x40
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    mov x10, #-1
+; CHECK-NEXT:    lsr x9, x10, x9
+; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c4_commutative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    lsr x8, x9, x8
+; CHECK-NEXT:    and x0, x0, x8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    lsl w9, w0, w8
+; CHECK-NEXT:    lsr w0, w9, w8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x20
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    lsl w9, w0, w8
+; CHECK-NEXT:    lsr w0, w9, w8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x20
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    lsl x9, x0, x8
+; CHECK-NEXT:    lsr x0, x9, x8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d1_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x40
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    lsl x9, x0, x8
+; CHECK-NEXT:    lsr x0, x9, x8
+; CHECK-NEXT:    ret
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d2_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d3_load_indexzext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    orr w9, wzr, #0x40
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant mask
+; ---------------------------------------------------------------------------- ;
+
+; 32-bit
+
+define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x7fffffff
+; CHECK-NEXT:    ret
+  %masked = and i32 %val, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask32_load(i32* %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask32_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    and w0, w8, #0x7fffffff
+; CHECK-NEXT:    ret
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x7fff
+; CHECK-NEXT:    ret
+  %masked = and i32 %val, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16_load(i32* %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask16_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    and w0, w8, #0x7fff
+; CHECK-NEXT:    ret
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w0, w0, #0x7f
+; CHECK-NEXT:    ret
+  %masked = and i32 %val, 127
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8_load(i32* %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask8_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    and w0, w8, #0x7f
+; CHECK-NEXT:    ret
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 127
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and x0, x0, #0x3fffffffffffffff
+; CHECK-NEXT:    ret
+  %masked = and i64 %val, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask64_load(i64* %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask64_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    and x0, x8, #0x3fffffffffffffff
+; CHECK-NEXT:    ret
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and x0, x0, #0x7fffffff
+; CHECK-NEXT:    ret
+  %masked = and i64 %val, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32_load(i64* %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask32_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    and x0, x8, #0x7fffffff
+; CHECK-NEXT:    ret
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and x0, x0, #0x7fff
+; CHECK-NEXT:    ret
+  %masked = and i64 %val, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16_load(i64* %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask16_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    and x0, x8, #0x7fff
+; CHECK-NEXT:    ret
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and x0, x0, #0x7f
+; CHECK-NEXT:    ret
+  %masked = and i64 %val, 127
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8_load(i64* %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask8_load:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    and x0, x8, #0x7f
+; CHECK-NEXT:    ret
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 127
+  ret i64 %masked
+}

Modified: llvm/trunk/test/CodeGen/X86/bmi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi.ll?rev=334124&r1=334123&r2=334124&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi.ll Wed Jun  6 12:38:10 2018
@@ -431,659 +431,6 @@ entry:
   ret i32 %and
 }
 
-define i32 @bzhi32b(i32 %x, i8 zeroext %index) {
-; X86-BMI1-LABEL: bzhi32b:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI1-NEXT:    movl $1, %eax
-; X86-BMI1-NEXT:    shll %cl, %eax
-; X86-BMI1-NEXT:    decl %eax
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi32b:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi32b:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $1, %eax
-; X64-BMI1-NEXT:    movl %esi, %ecx
-; X64-BMI1-NEXT:    shll %cl, %eax
-; X64-BMI1-NEXT:    decl %eax
-; X64-BMI1-NEXT:    andl %edi, %eax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi32b:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhil %esi, %edi, %eax
-; X64-BMI2-NEXT:    retq
-entry:
-  %conv = zext i8 %index to i32
-  %shl = shl i32 1, %conv
-  %sub = add nsw i32 %shl, -1
-  %and = and i32 %sub, %x
-  ret i32 %and
-}
-
-define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) {
-; X86-BMI1-LABEL: bzhi32b_load:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI1-NEXT:    movl $1, %eax
-; X86-BMI1-NEXT:    shll %cl, %eax
-; X86-BMI1-NEXT:    decl %eax
-; X86-BMI1-NEXT:    andl (%edx), %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi32b_load:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI2-NEXT:    bzhil %ecx, (%eax), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi32b_load:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $1, %eax
-; X64-BMI1-NEXT:    movl %esi, %ecx
-; X64-BMI1-NEXT:    shll %cl, %eax
-; X64-BMI1-NEXT:    decl %eax
-; X64-BMI1-NEXT:    andl (%rdi), %eax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi32b_load:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhil %esi, (%rdi), %eax
-; X64-BMI2-NEXT:    retq
-entry:
-  %x = load i32, i32* %w
-  %conv = zext i8 %index to i32
-  %shl = shl i32 1, %conv
-  %sub = add nsw i32 %shl, -1
-  %and = and i32 %sub, %x
-  ret i32 %and
-}
-
-define i32 @bzhi32c(i32 %x, i8 zeroext %index) {
-; X86-BMI1-LABEL: bzhi32c:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI1-NEXT:    movl $1, %eax
-; X86-BMI1-NEXT:    shll %cl, %eax
-; X86-BMI1-NEXT:    decl %eax
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi32c:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X86-BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi32c:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $1, %eax
-; X64-BMI1-NEXT:    movl %esi, %ecx
-; X64-BMI1-NEXT:    shll %cl, %eax
-; X64-BMI1-NEXT:    decl %eax
-; X64-BMI1-NEXT:    andl %edi, %eax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi32c:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhil %esi, %edi, %eax
-; X64-BMI2-NEXT:    retq
-entry:
-  %conv = zext i8 %index to i32
-  %shl = shl i32 1, %conv
-  %sub = add nsw i32 %shl, -1
-  %and = and i32 %x, %sub
-  ret i32 %and
-}
-
-define i32 @bzhi32d(i32 %a, i32 %b) {
-; X86-BMI1-LABEL: bzhi32d:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movl $32, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    movl $-1, %eax
-; X86-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-BMI1-NEXT:    shrl %cl, %eax
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi32d:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi32d:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $32, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    movl $-1, %eax
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrl %cl, %eax
-; X64-BMI1-NEXT:    andl %edi, %eax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi32d:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhil %esi, %edi, %eax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i32 32, %b
-  %shr = lshr i32 -1, %sub
-  %and = and i32 %shr, %a
-  ret i32 %and
-}
-
-define i32 @bzhi32e(i32 %a, i32 %b) {
-; X86-BMI1-LABEL: bzhi32e:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    movl $32, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    shll %cl, %eax
-; X86-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-BMI1-NEXT:    shrl %cl, %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi32e:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi32e:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $32, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    shll %cl, %edi
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrl %cl, %edi
-; X64-BMI1-NEXT:    movl %edi, %eax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi32e:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhil %esi, %edi, %eax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i32 32, %b
-  %shl = shl i32 %a, %sub
-  %shr = lshr i32 %shl, %sub
-  ret i32 %shr
-}
-
-define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
-; X86-BMI1-LABEL: bzhi64b:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI1-NEXT:    movl $1, %eax
-; X86-BMI1-NEXT:    xorl %edx, %edx
-; X86-BMI1-NEXT:    shldl %cl, %eax, %edx
-; X86-BMI1-NEXT:    shll %cl, %eax
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    je .LBB27_2
-; X86-BMI1-NEXT:  # %bb.1:
-; X86-BMI1-NEXT:    movl %eax, %edx
-; X86-BMI1-NEXT:    xorl %eax, %eax
-; X86-BMI1-NEXT:  .LBB27_2: # %entry
-; X86-BMI1-NEXT:    addl $-1, %eax
-; X86-BMI1-NEXT:    adcl $-1, %edx
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi64b:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-BMI2-NEXT:    movl $1, %eax
-; X86-BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI2-NEXT:    shldl %cl, %eax, %edx
-; X86-BMI2-NEXT:    shlxl %ecx, %eax, %eax
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    je .LBB27_2
-; X86-BMI2-NEXT:  # %bb.1:
-; X86-BMI2-NEXT:    movl %eax, %edx
-; X86-BMI2-NEXT:    xorl %eax, %eax
-; X86-BMI2-NEXT:  .LBB27_2: # %entry
-; X86-BMI2-NEXT:    addl $-1, %eax
-; X86-BMI2-NEXT:    adcl $-1, %edx
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64b:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $1, %eax
-; X64-BMI1-NEXT:    movl %esi, %ecx
-; X64-BMI1-NEXT:    shlq %cl, %rax
-; X64-BMI1-NEXT:    decq %rax
-; X64-BMI1-NEXT:    andq %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64b:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %conv = zext i8 %index to i64
-  %shl = shl i64 1, %conv
-  %sub = add nsw i64 %shl, -1
-  %and = and i64 %x, %sub
-  ret i64 %and
-}
-
-define i64 @bzhi64c(i64 %a, i64 %b) {
-; X86-BMI1-LABEL: bzhi64c:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movl $64, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    movl $-1, %eax
-; X86-BMI1-NEXT:    movl $-1, %edx
-; X86-BMI1-NEXT:    shrl %cl, %edx
-; X86-BMI1-NEXT:    shrdl %cl, %eax, %eax
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    je .LBB28_2
-; X86-BMI1-NEXT:  # %bb.1:
-; X86-BMI1-NEXT:    movl %edx, %eax
-; X86-BMI1-NEXT:    xorl %edx, %edx
-; X86-BMI1-NEXT:  .LBB28_2: # %entry
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi64c:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movl $64, %ecx
-; X86-BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT:    movl $-1, %eax
-; X86-BMI2-NEXT:    shrxl %ecx, %eax, %edx
-; X86-BMI2-NEXT:    shrdl %cl, %eax, %eax
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    je .LBB28_2
-; X86-BMI2-NEXT:  # %bb.1:
-; X86-BMI2-NEXT:    movl %edx, %eax
-; X86-BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI2-NEXT:  .LBB28_2: # %entry
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64c:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $64, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    movq $-1, %rax
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrq %cl, %rax
-; X64-BMI1-NEXT:    andq %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64c:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i64 64, %b
-  %shr = lshr i64 -1, %sub
-  %and = and i64 %shr, %a
-  ret i64 %and
-}
-
-define i64 @bzhi64d(i64 %a, i32 %b) {
-; X86-BMI1-LABEL: bzhi64d:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    movl $64, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    movl $-1, %eax
-; X86-BMI1-NEXT:    movl $-1, %edx
-; X86-BMI1-NEXT:    shrl %cl, %edx
-; X86-BMI1-NEXT:    shrdl %cl, %eax, %eax
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    je .LBB29_2
-; X86-BMI1-NEXT:  # %bb.1:
-; X86-BMI1-NEXT:    movl %edx, %eax
-; X86-BMI1-NEXT:    xorl %edx, %edx
-; X86-BMI1-NEXT:  .LBB29_2: # %entry
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi64d:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    movl $64, %ecx
-; X86-BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT:    movl $-1, %eax
-; X86-BMI2-NEXT:    shrxl %ecx, %eax, %edx
-; X86-BMI2-NEXT:    shrdl %cl, %eax, %eax
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    je .LBB29_2
-; X86-BMI2-NEXT:  # %bb.1:
-; X86-BMI2-NEXT:    movl %edx, %eax
-; X86-BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI2-NEXT:  .LBB29_2: # %entry
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64d:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $64, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    movq $-1, %rax
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrq %cl, %rax
-; X64-BMI1-NEXT:    andq %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64d:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i32 64, %b
-  %sh_prom = zext i32 %sub to i64
-  %shr = lshr i64 -1, %sh_prom
-  %and = and i64 %shr, %a
-  ret i64 %and
-}
-
-define i64 @bzhi64e(i64 %a, i64 %b) {
-; X86-BMI1-LABEL: bzhi64e:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    pushl %ebx
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT:    pushl %edi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT:    pushl %esi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
-; X86-BMI1-NEXT:    .cfi_offset %esi, -16
-; X86-BMI1-NEXT:    .cfi_offset %edi, -12
-; X86-BMI1-NEXT:    .cfi_offset %ebx, -8
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    movl $64, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    movl %edx, %esi
-; X86-BMI1-NEXT:    shll %cl, %esi
-; X86-BMI1-NEXT:    shldl %cl, %edx, %eax
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    movl %esi, %edi
-; X86-BMI1-NEXT:    jne .LBB30_2
-; X86-BMI1-NEXT:  # %bb.1: # %entry
-; X86-BMI1-NEXT:    movl %eax, %edi
-; X86-BMI1-NEXT:  .LBB30_2: # %entry
-; X86-BMI1-NEXT:    movl %edi, %eax
-; X86-BMI1-NEXT:    shrl %cl, %eax
-; X86-BMI1-NEXT:    xorl %ebx, %ebx
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    movl $0, %edx
-; X86-BMI1-NEXT:    jne .LBB30_4
-; X86-BMI1-NEXT:  # %bb.3: # %entry
-; X86-BMI1-NEXT:    movl %esi, %ebx
-; X86-BMI1-NEXT:    movl %eax, %edx
-; X86-BMI1-NEXT:  .LBB30_4: # %entry
-; X86-BMI1-NEXT:    shrdl %cl, %edi, %ebx
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    jne .LBB30_6
-; X86-BMI1-NEXT:  # %bb.5: # %entry
-; X86-BMI1-NEXT:    movl %ebx, %eax
-; X86-BMI1-NEXT:  .LBB30_6: # %entry
-; X86-BMI1-NEXT:    popl %esi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT:    popl %edi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT:    popl %ebx
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 4
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi64e:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    pushl %edi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT:    pushl %esi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI2-NEXT:    .cfi_offset %esi, -12
-; X86-BMI2-NEXT:    .cfi_offset %edi, -8
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-BMI2-NEXT:    movl $64, %ecx
-; X86-BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT:    shldl %cl, %eax, %esi
-; X86-BMI2-NEXT:    shlxl %ecx, %eax, %edi
-; X86-BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    je .LBB30_2
-; X86-BMI2-NEXT:  # %bb.1:
-; X86-BMI2-NEXT:    movl %edi, %esi
-; X86-BMI2-NEXT:    movl $0, %edi
-; X86-BMI2-NEXT:  .LBB30_2: # %entry
-; X86-BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI2-NEXT:    jne .LBB30_4
-; X86-BMI2-NEXT:  # %bb.3: # %entry
-; X86-BMI2-NEXT:    movl %eax, %edx
-; X86-BMI2-NEXT:  .LBB30_4: # %entry
-; X86-BMI2-NEXT:    shrdl %cl, %esi, %edi
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    jne .LBB30_6
-; X86-BMI2-NEXT:  # %bb.5: # %entry
-; X86-BMI2-NEXT:    movl %edi, %eax
-; X86-BMI2-NEXT:  .LBB30_6: # %entry
-; X86-BMI2-NEXT:    popl %esi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT:    popl %edi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 4
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64e:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $64, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    shlq %cl, %rdi
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrq %cl, %rdi
-; X64-BMI1-NEXT:    movq %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64e:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i64 64, %b
-  %shl = shl i64 %a, %sub
-  %shr = lshr i64 %shl, %sub
-  ret i64 %shr
-}
-
-define i64 @bzhi64f(i64 %a, i32 %b) {
-; X86-BMI1-LABEL: bzhi64f:
-; X86-BMI1:       # %bb.0: # %entry
-; X86-BMI1-NEXT:    pushl %ebx
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT:    pushl %edi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT:    pushl %esi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
-; X86-BMI1-NEXT:    .cfi_offset %esi, -16
-; X86-BMI1-NEXT:    .cfi_offset %edi, -12
-; X86-BMI1-NEXT:    .cfi_offset %ebx, -8
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    movl $64, %ecx
-; X86-BMI1-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    movl %edx, %esi
-; X86-BMI1-NEXT:    shll %cl, %esi
-; X86-BMI1-NEXT:    shldl %cl, %edx, %eax
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    movl %esi, %edi
-; X86-BMI1-NEXT:    jne .LBB31_2
-; X86-BMI1-NEXT:  # %bb.1: # %entry
-; X86-BMI1-NEXT:    movl %eax, %edi
-; X86-BMI1-NEXT:  .LBB31_2: # %entry
-; X86-BMI1-NEXT:    movl %edi, %eax
-; X86-BMI1-NEXT:    shrl %cl, %eax
-; X86-BMI1-NEXT:    xorl %ebx, %ebx
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    movl $0, %edx
-; X86-BMI1-NEXT:    jne .LBB31_4
-; X86-BMI1-NEXT:  # %bb.3: # %entry
-; X86-BMI1-NEXT:    movl %esi, %ebx
-; X86-BMI1-NEXT:    movl %eax, %edx
-; X86-BMI1-NEXT:  .LBB31_4: # %entry
-; X86-BMI1-NEXT:    shrdl %cl, %edi, %ebx
-; X86-BMI1-NEXT:    testb $32, %cl
-; X86-BMI1-NEXT:    jne .LBB31_6
-; X86-BMI1-NEXT:  # %bb.5: # %entry
-; X86-BMI1-NEXT:    movl %ebx, %eax
-; X86-BMI1-NEXT:  .LBB31_6: # %entry
-; X86-BMI1-NEXT:    popl %esi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT:    popl %edi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT:    popl %ebx
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 4
-; X86-BMI1-NEXT:    retl
-;
-; X86-BMI2-LABEL: bzhi64f:
-; X86-BMI2:       # %bb.0: # %entry
-; X86-BMI2-NEXT:    pushl %edi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT:    pushl %esi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI2-NEXT:    .cfi_offset %esi, -12
-; X86-BMI2-NEXT:    .cfi_offset %edi, -8
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-BMI2-NEXT:    movl $64, %ecx
-; X86-BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-BMI2-NEXT:    shldl %cl, %eax, %esi
-; X86-BMI2-NEXT:    shlxl %ecx, %eax, %edi
-; X86-BMI2-NEXT:    xorl %edx, %edx
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    je .LBB31_2
-; X86-BMI2-NEXT:  # %bb.1:
-; X86-BMI2-NEXT:    movl %edi, %esi
-; X86-BMI2-NEXT:    movl $0, %edi
-; X86-BMI2-NEXT:  .LBB31_2: # %entry
-; X86-BMI2-NEXT:    shrxl %ecx, %esi, %eax
-; X86-BMI2-NEXT:    jne .LBB31_4
-; X86-BMI2-NEXT:  # %bb.3: # %entry
-; X86-BMI2-NEXT:    movl %eax, %edx
-; X86-BMI2-NEXT:  .LBB31_4: # %entry
-; X86-BMI2-NEXT:    shrdl %cl, %esi, %edi
-; X86-BMI2-NEXT:    testb $32, %cl
-; X86-BMI2-NEXT:    jne .LBB31_6
-; X86-BMI2-NEXT:  # %bb.5: # %entry
-; X86-BMI2-NEXT:    movl %edi, %eax
-; X86-BMI2-NEXT:  .LBB31_6: # %entry
-; X86-BMI2-NEXT:    popl %esi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT:    popl %edi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 4
-; X86-BMI2-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64f:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $64, %ecx
-; X64-BMI1-NEXT:    subl %esi, %ecx
-; X64-BMI1-NEXT:    shlq %cl, %rdi
-; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-BMI1-NEXT:    shrq %cl, %rdi
-; X64-BMI1-NEXT:    movq %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64f:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %sub = sub i32 64, %b
-  %sh_prom = zext i32 %sub to i64
-  %shl = shl i64 %a, %sh_prom
-  %shr = lshr i64 %shl, %sh_prom
-  ret i64 %shr
-}
-
-define i64 @bzhi64_constant_mask(i64 %x) {
-; X86-LABEL: bzhi64_constant_mask:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $1073741823, %edx # imm = 0x3FFFFFFF
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64_constant_mask:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $15872, %eax # imm = 0x3E00
-; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64_constant_mask:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    movb $62, %al
-; X64-BMI2-NEXT:    bzhiq %rax, %rdi, %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %and = and i64 %x, 4611686018427387903
-  ret i64 %and
-}
-
-define i64 @bzhi64_constant_mask_load(i64* %x) {
-; X86-LABEL: bzhi64_constant_mask_load:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    movl $1073741823, %edx # imm = 0x3FFFFFFF
-; X86-NEXT:    andl 4(%ecx), %edx
-; X86-NEXT:    retl
-;
-; X64-BMI1-LABEL: bzhi64_constant_mask_load:
-; X64-BMI1:       # %bb.0: # %entry
-; X64-BMI1-NEXT:    movl $15872, %eax # imm = 0x3E00
-; X64-BMI1-NEXT:    bextrq %rax, (%rdi), %rax
-; X64-BMI1-NEXT:    retq
-;
-; X64-BMI2-LABEL: bzhi64_constant_mask_load:
-; X64-BMI2:       # %bb.0: # %entry
-; X64-BMI2-NEXT:    movb $62, %al
-; X64-BMI2-NEXT:    bzhiq %rax, (%rdi), %rax
-; X64-BMI2-NEXT:    retq
-entry:
-  %x1 = load i64, i64* %x
-  %and = and i64 %x1, 4611686018427387903
-  ret i64 %and
-}
-
-define i64 @bzhi64_small_constant_mask(i64 %x) {
-; X86-LABEL: bzhi64_small_constant_mask:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorl %edx, %edx
-; X86-NEXT:    retl
-;
-; X64-LABEL: bzhi64_small_constant_mask:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    retq
-entry:
-  %and = and i64 %x, 2147483647
-  ret i64 %and
-}
-
 define i32 @blsi32(i32 %x)   {
 ; X86-LABEL: blsi32:
 ; X86:       # %bb.0:

Added: llvm/trunk/test/CodeGen/X86/extract-lowbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-lowbits.ll?rev=334124&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-lowbits.ll (added)
+++ llvm/trunk/test/CodeGen/X86/extract-lowbits.ll Wed Jun  6 12:38:10 2018
@@ -0,0 +1,2374 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI,X86-NOBMI
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1TBM,X86-BMI1TBM
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1TBM,X86-BMI1TBM,BMI1TBMBMI2,X86-BMI1TBMBMI2
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1NOTBMBMI2,X86-BMI1NOTBMBMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI,X64-NOBMI
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1TBM,X64-BMI1TBM
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1TBM,X64-BMI1TBM,BMI1TBMBMI2,X64-BMI1TBMBMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1NOTBMBMI2,X64-BMI1NOTBMBMI2
+
+; *Please* keep in sync with test/CodeGen/AArch64/extract-lowbits.ll
+
+; https://bugs.llvm.org/show_bug.cgi?id=36419
+; https://bugs.llvm.org/show_bug.cgi?id=37603
+; https://bugs.llvm.org/show_bug.cgi?id=37610
+
+; Patterns:
+;   a) x &  (1 << nbits) - 1
+;   b) x & ~(-1 << nbits)
+;   c) x &  (-1 >> (32 - y))
+;   d) x << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_a0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_a0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_a0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_a0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_a1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_a1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_a1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_a1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_a2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_a2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    bzhil %ecx, (%eax), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_a2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_a2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, (%rdi), %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_a3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_a3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    bzhil %ecx, (%eax), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_a3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_a3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, (%rdi), %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_a4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    decl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_a4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_a4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    decl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_a4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_a0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB5_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB5_2:
+; X86-NOBMI-NEXT:    addl $-1, %eax
+; X86-NOBMI-NEXT:    adcl $-1, %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_a0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB5_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB5_2:
+; X86-BMI1BMI2-NEXT:    addl $-1, %eax
+; X86-BMI1BMI2-NEXT:    adcl $-1, %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_a0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_a0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_a1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB6_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB6_2:
+; X86-NOBMI-NEXT:    addl $-1, %eax
+; X86-NOBMI-NEXT:    adcl $-1, %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_a1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB6_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB6_2:
+; X86-BMI1BMI2-NEXT:    addl $-1, %eax
+; X86-BMI1BMI2-NEXT:    adcl $-1, %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_a1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_a1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_a2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB7_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB7_2:
+; X86-NOBMI-NEXT:    addl $-1, %eax
+; X86-NOBMI-NEXT:    adcl $-1, %edx
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_a2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB7_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB7_2:
+; X86-BMI1BMI2-NEXT:    addl $-1, %eax
+; X86-BMI1BMI2-NEXT:    adcl $-1, %edx
+; X86-BMI1BMI2-NEXT:    andl 4(%esi), %edx
+; X86-BMI1BMI2-NEXT:    andl (%esi), %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_a2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decq %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_a2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, (%rdi), %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_a3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB8_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB8_2:
+; X86-NOBMI-NEXT:    addl $-1, %eax
+; X86-NOBMI-NEXT:    adcl $-1, %edx
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_a3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB8_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB8_2:
+; X86-BMI1BMI2-NEXT:    addl $-1, %eax
+; X86-BMI1BMI2-NEXT:    adcl $-1, %edx
+; X86-BMI1BMI2-NEXT:    andl 4(%esi), %edx
+; X86-BMI1BMI2-NEXT:    andl (%esi), %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_a3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decq %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_a3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, (%rdi), %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_a4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $1, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %eax, %edx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB9_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB9_2:
+; X86-NOBMI-NEXT:    addl $-1, %eax
+; X86-NOBMI-NEXT:    adcl $-1, %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_a4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $1, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB9_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI1BMI2-NEXT:  .LBB9_2:
+; X86-BMI1BMI2-NEXT:    addl $-1, %eax
+; X86-BMI1BMI2-NEXT:    adcl $-1, %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_a4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    decq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_a4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_b0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_b0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X86-BMI1BMI2-NEXT:    shlxl %eax, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_b0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_b0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movl $-1, %eax
+; X64-BMI1BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    andnl %edi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_b1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_b1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X86-BMI1BMI2-NEXT:    shlxl %eax, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_b1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_b1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movl $-1, %eax
+; X64-BMI1BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    andnl %edi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_b2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_b2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %ecx
+; X86-BMI1BMI2-NEXT:    andnl (%eax), %ecx, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_b2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_b2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movl $-1, %eax
+; X64-BMI1BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    andnl (%rdi), %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_b3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_b3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %ecx
+; X86-BMI1BMI2-NEXT:    andnl (%eax), %ecx, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_b3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_b3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movl $-1, %eax
+; X64-BMI1BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    andnl (%rdi), %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_b4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_b4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X86-BMI1BMI2-NEXT:    shlxl %eax, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_b4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    notl %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_b4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movl $-1, %eax
+; X64-BMI1BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    andnl %edi, %eax, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_b0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB15_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB15_2:
+; X86-NOBMI-NEXT:    notl %edx
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_b0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %esi
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB15_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %esi, %eax
+; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
+; X86-BMI1BMI2-NEXT:  .LBB15_2:
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %edx
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %esi, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_b0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_b0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andnq %rdi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_b1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB16_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB16_2:
+; X86-NOBMI-NEXT:    notl %edx
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_b1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %esi
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB16_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %esi, %eax
+; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
+; X86-BMI1BMI2-NEXT:  .LBB16_2:
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %edx
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %esi, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_b1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_b1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andnq %rdi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_b2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB17_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB17_2:
+; X86-NOBMI-NEXT:    notl %edx
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_b2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %esi
+; X86-BMI1BMI2-NEXT:    shldl %cl, %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB17_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %esi, %edx
+; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
+; X86-BMI1BMI2-NEXT:  .LBB17_2:
+; X86-BMI1BMI2-NEXT:    andnl 4(%eax), %edx, %edx
+; X86-BMI1BMI2-NEXT:    andnl (%eax), %esi, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_b2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notq %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_b2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andnq (%rdi), %rax, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_b3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB18_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB18_2:
+; X86-NOBMI-NEXT:    notl %edx
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_b3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %edx
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %esi
+; X86-BMI1BMI2-NEXT:    shldl %cl, %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB18_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %esi, %edx
+; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
+; X86-BMI1BMI2-NEXT:  .LBB18_2:
+; X86-BMI1BMI2-NEXT:    andnl 4(%eax), %edx, %edx
+; X86-BMI1BMI2-NEXT:    andnl (%eax), %esi, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_b3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notq %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_b3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andnq (%rdi), %rax, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_b4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %edx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB19_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB19_2:
+; X86-NOBMI-NEXT:    notl %edx
+; X86-NOBMI-NEXT:    notl %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_b4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %esi
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB19_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %esi, %eax
+; X86-BMI1BMI2-NEXT:    xorl %esi, %esi
+; X86-BMI1BMI2-NEXT:  .LBB19_2:
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %eax, %edx
+; X86-BMI1BMI2-NEXT:    andnl {{[0-9]+}}(%esp), %esi, %eax
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_b4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    notq %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_b4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movq $-1, %rax
+; X64-BMI1BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    andnq %rdi, %rax, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_c0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl $32, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_c0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_c0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $32, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_c0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_c1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb $32, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_c1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb $32, %al
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X86-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_c1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $32, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_c1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $32, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X64-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X64-BMI1BMI2-NEXT:    andl %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_c2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl $32, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_c2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    bzhil %eax, (%ecx), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_c2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $32, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_c2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, (%rdi), %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_c3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movb $32, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    andl (%edx), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_c3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    movb $32, %al
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    movl $-1, %edx
+; X86-BMI1BMI2-NEXT:    shrxl %eax, %edx, %eax
+; X86-BMI1BMI2-NEXT:    andl (%ecx), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_c3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $32, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    andl (%rdi), %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_c3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $32, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    movl $-1, %ecx
+; X64-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X64-BMI1BMI2-NEXT:    andl (%rdi), %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_c4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl $32, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_c4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_c4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $32, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movl $-1, %eax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    andl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_c4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_c0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl $64, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB25_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB25_2:
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_c0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl $64, %ecx
+; X86-BMI1BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB25_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB25_2:
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_c0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $64, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_c0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_c1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB26_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB26_2:
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_c1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB26_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB26_2:
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_c1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $64, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_c1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $64, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    movq $-1, %rcx
+; X64-BMI1BMI2-NEXT:    shrxq %rax, %rcx, %rax
+; X64-BMI1BMI2-NEXT:    andq %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_c2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl $64, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB27_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB27_2:
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_c2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movl $64, %ecx
+; X86-BMI1BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB27_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB27_2:
+; X86-BMI1BMI2-NEXT:    andl (%esi), %eax
+; X86-BMI1BMI2-NEXT:    andl 4(%esi), %edx
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_c2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $64, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_c2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, (%rdi), %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_c3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB28_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB28_2:
+; X86-NOBMI-NEXT:    andl (%esi), %eax
+; X86-NOBMI-NEXT:    andl 4(%esi), %edx
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_c3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB28_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB28_2:
+; X86-BMI1BMI2-NEXT:    andl (%esi), %eax
+; X86-BMI1BMI2-NEXT:    andl 4(%esi), %edx
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_c3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $64, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_c3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $64, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    movq $-1, %rcx
+; X64-BMI1BMI2-NEXT:    shrxq %rax, %rcx, %rax
+; X64-BMI1BMI2-NEXT:    andq (%rdi), %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_c4_commutative:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl $64, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    shrl %cl, %edx
+; X86-NOBMI-NEXT:    shrdl %cl, %eax, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB29_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %edx, %eax
+; X86-NOBMI-NEXT:    xorl %edx, %edx
+; X86-NOBMI-NEXT:  .LBB29_2:
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_c4_commutative:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl $64, %ecx
+; X86-BMI1BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    movl $-1, %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %edx
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %eax, %eax
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB29_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edx, %eax
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:  .LBB29_2:
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_c4_commutative:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $64, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    movq $-1, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_c4_commutative:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_d0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl $32, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_d0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_d0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $32, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %edi
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrl %cl, %edi
+; X64-NOBMI-NEXT:    movl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_d0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, %edi, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_d1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movb $32, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_d1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movb $32, %al
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %al
+; X86-BMI1BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_d1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $32, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    shll %cl, %edi
+; X64-NOBMI-NEXT:    shrl %cl, %edi
+; X64-NOBMI-NEXT:    movl %edi, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_d1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $32, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    shlxl %eax, %edi, %ecx
+; X64-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_d2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl (%eax), %eax
+; X86-NOBMI-NEXT:    movl $32, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_d2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    bzhil %eax, (%ecx), %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_d2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl (%rdi), %eax
+; X64-NOBMI-NEXT:    movl $32, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_d2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhil %esi, (%rdi), %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi32_d3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl (%eax), %eax
+; X86-NOBMI-NEXT:    movb $32, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi32_d3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movb $32, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, (%eax), %eax
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %eax, %eax
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi32_d3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl (%rdi), %eax
+; X64-NOBMI-NEXT:    movb $32, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi32_d3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $32, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    shlxl %eax, (%rdi), %ecx
+; X64-BMI1BMI2-NEXT:    shrxl %eax, %ecx, %eax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i32, i32* %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_d0:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl $64, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl %edx, %esi
+; X86-NOBMI-NEXT:    shll %cl, %esi
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl %esi, %edi
+; X86-NOBMI-NEXT:    jne .LBB34_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edi
+; X86-NOBMI-NEXT:  .LBB34_2:
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    xorl %ebx, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl $0, %edx
+; X86-NOBMI-NEXT:    jne .LBB34_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %ebx
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:  .LBB34_4:
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB34_6
+; X86-NOBMI-NEXT:  # %bb.5:
+; X86-NOBMI-NEXT:    movl %ebx, %eax
+; X86-NOBMI-NEXT:  .LBB34_6:
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_d0:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %edi
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movl $64, %ecx
+; X86-BMI1BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB34_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edi, %esi
+; X86-BMI1BMI2-NEXT:    movl $0, %edi
+; X86-BMI1BMI2-NEXT:  .LBB34_2:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
+; X86-BMI1BMI2-NEXT:    jne .LBB34_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB34_4:
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    jne .LBB34_6
+; X86-BMI1BMI2-NEXT:  # %bb.5:
+; X86-BMI1BMI2-NEXT:    movl %edi, %eax
+; X86-BMI1BMI2-NEXT:  .LBB34_6:
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    popl %edi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_d0:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl $64, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rdi
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_d0:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, %rdi, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_d1_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl %edx, %esi
+; X86-NOBMI-NEXT:    shll %cl, %esi
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl %esi, %edi
+; X86-NOBMI-NEXT:    jne .LBB35_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edi
+; X86-NOBMI-NEXT:  .LBB35_2:
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    xorl %ebx, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl $0, %edx
+; X86-NOBMI-NEXT:    jne .LBB35_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %ebx
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:  .LBB35_4:
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB35_6
+; X86-NOBMI-NEXT:  # %bb.5:
+; X86-NOBMI-NEXT:    movl %ebx, %eax
+; X86-NOBMI-NEXT:  .LBB35_6:
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_d1_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %edi
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    shldl %cl, %eax, %esi
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %eax, %edi
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB35_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edi, %esi
+; X86-BMI1BMI2-NEXT:    movl $0, %edi
+; X86-BMI1BMI2-NEXT:  .LBB35_2:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
+; X86-BMI1BMI2-NEXT:    jne .LBB35_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB35_4:
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    jne .LBB35_6
+; X86-BMI1BMI2-NEXT:  # %bb.5:
+; X86-BMI1BMI2-NEXT:    movl %edi, %eax
+; X86-BMI1BMI2-NEXT:  .LBB35_6:
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    popl %edi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_d1_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movb $64, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    shlq %cl, %rdi
+; X64-NOBMI-NEXT:    shrq %cl, %rdi
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_d1_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $64, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    shlxq %rax, %rdi, %rcx
+; X64-BMI1BMI2-NEXT:    shrxq %rax, %rcx, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_d2_load:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl (%eax), %edx
+; X86-NOBMI-NEXT:    movl 4(%eax), %eax
+; X86-NOBMI-NEXT:    movl $64, %ecx
+; X86-NOBMI-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl %edx, %esi
+; X86-NOBMI-NEXT:    shll %cl, %esi
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl %esi, %edi
+; X86-NOBMI-NEXT:    jne .LBB36_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edi
+; X86-NOBMI-NEXT:  .LBB36_2:
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    xorl %ebx, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl $0, %edx
+; X86-NOBMI-NEXT:    jne .LBB36_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %ebx
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:  .LBB36_4:
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB36_6
+; X86-NOBMI-NEXT:  # %bb.5:
+; X86-NOBMI-NEXT:    movl %ebx, %eax
+; X86-NOBMI-NEXT:  .LBB36_6:
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_d2_load:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %edi
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl (%eax), %edx
+; X86-BMI1BMI2-NEXT:    movl 4(%eax), %esi
+; X86-BMI1BMI2-NEXT:    movl $64, %ecx
+; X86-BMI1BMI2-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1BMI2-NEXT:    shldl %cl, %edx, %esi
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %edi
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB36_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edi, %esi
+; X86-BMI1BMI2-NEXT:    movl $0, %edi
+; X86-BMI1BMI2-NEXT:  .LBB36_2:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
+; X86-BMI1BMI2-NEXT:    jne .LBB36_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB36_4:
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    jne .LBB36_6
+; X86-BMI1BMI2-NEXT:  # %bb.5:
+; X86-BMI1BMI2-NEXT:    movl %edi, %eax
+; X86-BMI1BMI2-NEXT:  .LBB36_6:
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    popl %edi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_d2_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq (%rdi), %rax
+; X64-NOBMI-NEXT:    movl $64, %ecx
+; X64-NOBMI-NEXT:    subl %esi, %ecx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_d2_load:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    bzhiq %rsi, (%rdi), %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+; X86-NOBMI-LABEL: bzhi64_d3_load_indexzext:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl (%eax), %edx
+; X86-NOBMI-NEXT:    movl 4(%eax), %eax
+; X86-NOBMI-NEXT:    movb $64, %cl
+; X86-NOBMI-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-NOBMI-NEXT:    movl %edx, %esi
+; X86-NOBMI-NEXT:    shll %cl, %esi
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl %esi, %edi
+; X86-NOBMI-NEXT:    jne .LBB37_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edi
+; X86-NOBMI-NEXT:  .LBB37_2:
+; X86-NOBMI-NEXT:    movl %edi, %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    xorl %ebx, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    movl $0, %edx
+; X86-NOBMI-NEXT:    jne .LBB37_4
+; X86-NOBMI-NEXT:  # %bb.3:
+; X86-NOBMI-NEXT:    movl %esi, %ebx
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:  .LBB37_4:
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebx
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    jne .LBB37_6
+; X86-NOBMI-NEXT:  # %bb.5:
+; X86-NOBMI-NEXT:    movl %ebx, %eax
+; X86-NOBMI-NEXT:  .LBB37_6:
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1BMI2-LABEL: bzhi64_d3_load_indexzext:
+; X86-BMI1BMI2:       # %bb.0:
+; X86-BMI1BMI2-NEXT:    pushl %edi
+; X86-BMI1BMI2-NEXT:    pushl %esi
+; X86-BMI1BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1BMI2-NEXT:    movl (%eax), %edx
+; X86-BMI1BMI2-NEXT:    movl 4(%eax), %esi
+; X86-BMI1BMI2-NEXT:    movb $64, %cl
+; X86-BMI1BMI2-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X86-BMI1BMI2-NEXT:    shldl %cl, %edx, %esi
+; X86-BMI1BMI2-NEXT:    shlxl %ecx, %edx, %edi
+; X86-BMI1BMI2-NEXT:    xorl %edx, %edx
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    je .LBB37_2
+; X86-BMI1BMI2-NEXT:  # %bb.1:
+; X86-BMI1BMI2-NEXT:    movl %edi, %esi
+; X86-BMI1BMI2-NEXT:    movl $0, %edi
+; X86-BMI1BMI2-NEXT:  .LBB37_2:
+; X86-BMI1BMI2-NEXT:    shrxl %ecx, %esi, %eax
+; X86-BMI1BMI2-NEXT:    jne .LBB37_4
+; X86-BMI1BMI2-NEXT:  # %bb.3:
+; X86-BMI1BMI2-NEXT:    movl %eax, %edx
+; X86-BMI1BMI2-NEXT:  .LBB37_4:
+; X86-BMI1BMI2-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI1BMI2-NEXT:    testb $32, %cl
+; X86-BMI1BMI2-NEXT:    jne .LBB37_6
+; X86-BMI1BMI2-NEXT:  # %bb.5:
+; X86-BMI1BMI2-NEXT:    movl %edi, %eax
+; X86-BMI1BMI2-NEXT:  .LBB37_6:
+; X86-BMI1BMI2-NEXT:    popl %esi
+; X86-BMI1BMI2-NEXT:    popl %edi
+; X86-BMI1BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_d3_load_indexzext:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq (%rdi), %rax
+; X64-NOBMI-NEXT:    movb $64, %cl
+; X64-NOBMI-NEXT:    subb %sil, %cl
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1BMI2-LABEL: bzhi64_d3_load_indexzext:
+; X64-BMI1BMI2:       # %bb.0:
+; X64-BMI1BMI2-NEXT:    movb $64, %al
+; X64-BMI1BMI2-NEXT:    subb %sil, %al
+; X64-BMI1BMI2-NEXT:    shlxq %rax, (%rdi), %rcx
+; X64-BMI1BMI2-NEXT:    shrxq %rax, %rcx, %rax
+; X64-BMI1BMI2-NEXT:    retq
+  %val = load i64, i64* %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant mask
+; ---------------------------------------------------------------------------- ;
+
+; 32-bit
+
+define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask32:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %val, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask32_load(i32* %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask32_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask32_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    andl (%rdi), %eax
+; X64-NEXT:    retq
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32767, %eax # imm = 0x7FFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask16:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $32767, %edi # imm = 0x7FFF
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %val, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16_load(i32* %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask16_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $32767, %eax # imm = 0x7FFF
+; X86-NEXT:    andl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask16_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movl $32767, %eax # imm = 0x7FFF
+; X64-NEXT:    andl (%rdi), %eax
+; X64-NEXT:    retq
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask8:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $127, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
+  %masked = and i32 %val, 127
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8_load(i32* %val) nounwind {
+; X86-LABEL: bzhi32_constant_mask8_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi32_constant_mask8_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    retq
+  %val1 = load i32, i32* %val
+  %masked = and i32 %val1, 127
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $1073741823, %edx # imm = 0x3FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_constant_mask64:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1TBM-LABEL: bzhi64_constant_mask64:
+; X64-BMI1TBM:       # %bb.0:
+; X64-BMI1TBM-NEXT:    bextrq $15872, %rdi, %rax # imm = 0x3E00
+; X64-BMI1TBM-NEXT:    retq
+;
+; X64-BMI1NOTBMBMI2-LABEL: bzhi64_constant_mask64:
+; X64-BMI1NOTBMBMI2:       # %bb.0:
+; X64-BMI1NOTBMBMI2-NEXT:    movb $62, %al
+; X64-BMI1NOTBMBMI2-NEXT:    bzhiq %rax, %rdi, %rax
+; X64-BMI1NOTBMBMI2-NEXT:    retq
+  %masked = and i64 %val, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask64_load(i64* %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask64_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl $1073741823, %edx # imm = 0x3FFFFFFF
+; X86-NEXT:    andl 4(%ecx), %edx
+; X86-NEXT:    retl
+;
+; X64-NOBMI-LABEL: bzhi64_constant_mask64_load:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF
+; X64-NOBMI-NEXT:    andq (%rdi), %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1TBM-LABEL: bzhi64_constant_mask64_load:
+; X64-BMI1TBM:       # %bb.0:
+; X64-BMI1TBM-NEXT:    bextrq $15872, (%rdi), %rax # imm = 0x3E00
+; X64-BMI1TBM-NEXT:    retq
+;
+; X64-BMI1NOTBMBMI2-LABEL: bzhi64_constant_mask64_load:
+; X64-BMI1NOTBMBMI2:       # %bb.0:
+; X64-BMI1NOTBMBMI2-NEXT:    movb $62, %al
+; X64-BMI1NOTBMBMI2-NEXT:    bzhiq %rax, (%rdi), %rax
+; X64-BMI1NOTBMBMI2-NEXT:    retq
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask32:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $2147483647, %edi # imm = 0x7FFFFFFF
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+  %masked = and i64 %val, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32_load(i64* %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask32_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; X86-NEXT:    andl (%ecx), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask32_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; X64-NEXT:    retq
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32767, %eax # imm = 0x7FFF
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask16:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $32767, %edi # imm = 0x7FFF
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+  %masked = and i64 %val, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16_load(i64* %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask16_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $32767, %eax # imm = 0x7FFF
+; X86-NEXT:    andl (%ecx), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask16_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    andl $32767, %eax # imm = 0x7FFF
+; X64-NEXT:    retq
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask8:
+; X64:       # %bb.0:
+; X64-NEXT:    andl $127, %edi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+  %masked = and i64 %val, 127
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8_load(i64* %val) nounwind {
+; X86-LABEL: bzhi64_constant_mask8_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: bzhi64_constant_mask8_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    retq
+  %val1 = load i64, i64* %val
+  %masked = and i64 %val1, 127
+  ret i64 %masked
+}




More information about the llvm-commits mailing list