[llvm] [ARM] shouldFoldMaskToVariableShiftPair should be true for scalars up to the biggest legal type (PR #158070)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 11 06:19:11 PDT 2025


https://github.com/AZero13 created https://github.com/llvm/llvm-project/pull/158070

For ARM, we want to do this up to 32-bits. Otherwise the code ends up bigger and bloated.

>From 1cd1f0237cbb022f8e7716ace276cd04b098a9d1 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 11 Sep 2025 09:13:32 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/ARM/and-mask-variable.ll |   94 +
 llvm/test/CodeGen/ARM/extract-bits.ll      | 4627 ++++++++++++++++++++
 llvm/test/CodeGen/ARM/extract-lowbits.ll   | 2780 ++++++++++++
 3 files changed, 7501 insertions(+)
 create mode 100644 llvm/test/CodeGen/ARM/and-mask-variable.ll
 create mode 100644 llvm/test/CodeGen/ARM/extract-bits.ll
 create mode 100644 llvm/test/CodeGen/ARM/extract-lowbits.ll

diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000000000..0b57fe278bf6e
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %shl = shl nsw i32 -1, %y
+  %and = and i32 %shl, %x
+  ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    and.w r0, r0, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    and.w r0, r0, r12
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shl = shl nsw i64 -1, %y
+  %and = and i64 %shl, %x
+  ret i64 %and
+}
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
new file mode 100644
index 0000000000000..86fc0d3d3781a
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -0,0 +1,4627 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll
+
+; https://bugs.llvm.org/show_bug.cgi?id=36419
+; https://bugs.llvm.org/show_bug.cgi?id=37603
+; https://bugs.llvm.org/show_bug.cgi?id=37610
+
+; Patterns:
+;   a) (x >> start) &  (1 << nbits) - 1
+;   b) (x >> start) & ~(-1 << nbits)
+;   c) (x >> start) &  (-1 >> (32 - y))
+;   d) (x >> start) << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a0_arithmetic:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    asrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a0_arithmetic:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, asr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a0_arithmetic:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    asrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a0_arithmetic:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    asrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = ashr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %shifted, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w r4, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r4
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r3, r12, #32
+; V7A-NEXT:    subs r4, r12, #32
+; V7A-NEXT:    lsr r3, lr, r3
+; V7A-NEXT:    lslpl r3, lr, r4
+; V7A-NEXT:    lsl r4, lr, r12
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs r4, r4, #1
+; V7A-NEXT:    sbc r12, r3, #0
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r0, r4, r0
+; V7A-NEXT:    and r1, r12, r1
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    rsb.w r4, r12, #32
+; V7A-T-NEXT:    subs.w r3, r12, #32
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r3
+; V7A-T-NEXT:    lsl.w r3, lr, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    subs r3, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r0, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #12
+; V6M-NEXT:    sub sp, #12
+; V6M-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    ldr r2, [sp, #32]
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    subs r5, r0, #1
+; V6M-NEXT:    sbcs r4, r7
+; V6M-NEXT:    mov r0, r6
+; V6M-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    add sp, #12
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a0_arithmetic:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w r4, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    asrpl.w r0, r1, r4
+; CHECK-NEXT:    asr.w r2, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    asrpl r2, r1, #31
+; CHECK-NEXT:    and.w r1, r12, r2
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0_arithmetic:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r3, r12, #32
+; V7A-NEXT:    subs r4, r12, #32
+; V7A-NEXT:    lsr r3, lr, r3
+; V7A-NEXT:    lslpl r3, lr, r4
+; V7A-NEXT:    lsl r4, lr, r12
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs r4, r4, #1
+; V7A-NEXT:    sbc r12, r3, #0
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    asr r2, r1, r2
+; V7A-NEXT:    asrpl r0, r1, r3
+; V7A-NEXT:    asrpl r2, r1, #31
+; V7A-NEXT:    and r0, r4, r0
+; V7A-NEXT:    and r1, r12, r2
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0_arithmetic:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    rsb.w r4, r12, #32
+; V7A-T-NEXT:    subs.w r3, r12, #32
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r3
+; V7A-T-NEXT:    lsl.w r3, lr, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    subs r3, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r0, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    asrpl.w r0, r1, r4
+; V7A-T-NEXT:    asr.w r2, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    asrpl r2, r1, #31
+; V7A-T-NEXT:    and.w r1, r12, r2
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0_arithmetic:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #12
+; V6M-NEXT:    sub sp, #12
+; V6M-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    ldr r2, [sp, #32]
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    subs r5, r0, #1
+; V6M-NEXT:    sbcs r4, r7
+; V6M-NEXT:    mov r0, r6
+; V6M-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_lasr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    add sp, #12
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %shifted = ashr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    rsb.w r4, r3, #32
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    subs.w r12, r3, #32
+; CHECK-NEXT:    lsl.w r3, lr, r3
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    lsr.w r0, r0, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r4
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    rsb r12, r3, #32
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    subs r4, r3, #32
+; V7A-NEXT:    lsl r3, lr, r3
+; V7A-NEXT:    lsr r12, lr, r12
+; V7A-NEXT:    movwpl r3, #0
+; V7A-NEXT:    lslpl r12, lr, r4
+; V7A-NEXT:    rsb r4, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    subs r3, r3, #1
+; V7A-NEXT:    sbc r12, r12, #0
+; V7A-NEXT:    orr r0, r0, r1, lsl r4
+; V7A-NEXT:    subs r4, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r4
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    and r1, r12, r1
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r4, r3, #32
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    subs.w r12, r3, #32
+; V7A-T-NEXT:    lsl.w r3, lr, r3
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    lsr.w r0, r0, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    subs r3, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r0, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #12
+; V6M-NEXT:    sub sp, #12
+; V6M-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    mov r2, r3
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    subs r5, r0, #1
+; V6M-NEXT:    sbcs r4, r7
+; V6M-NEXT:    mov r0, r6
+; V6M-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    add sp, #12
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    rsb.w r1, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r1, lr, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs.w lr, r3, #1
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    sbc r12, r1, #0
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    lsl.w r1, r3, r1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    subs.w r1, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r3, r1
+; CHECK-NEXT:    lsr.w r1, r3, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r0, r0, lr
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r6, lr}
+; V7A-NEXT:    push {r4, r5, r6, lr}
+; V7A-NEXT:    ldr r1, [sp, #16]
+; V7A-NEXT:    mov r3, #1
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    ldr r5, [r0, #4]
+; V7A-NEXT:    rsb r0, r1, #32
+; V7A-NEXT:    subs r4, r1, #32
+; V7A-NEXT:    lsl r1, r3, r1
+; V7A-NEXT:    lsr r0, r3, r0
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    lslpl r0, r3, r4
+; V7A-NEXT:    subs r1, r1, #1
+; V7A-NEXT:    sbc r3, r0, #0
+; V7A-NEXT:    lsr r0, r6, r2
+; V7A-NEXT:    rsb r6, r2, #32
+; V7A-NEXT:    orr r0, r0, r5, lsl r6
+; V7A-NEXT:    subs r6, r2, #32
+; V7A-NEXT:    lsrpl r0, r5, r6
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    lsr r1, r5, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    movs r3, #1
+; V7A-T-NEXT:    ldrd lr, r1, [r0]
+; V7A-T-NEXT:    rsb.w r4, r12, #32
+; V7A-T-NEXT:    subs.w r0, r12, #32
+; V7A-T-NEXT:    lsr.w r4, r3, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, r3, r0
+; V7A-T-NEXT:    lsl.w r0, r3, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsr.w r3, lr, r2
+; V7A-T-NEXT:    subs r0, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r3, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r3, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #4
+; V6M-NEXT:    sub sp, #4
+; V6M-NEXT:    str r2, [sp] @ 4-byte Spill
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    ldr r2, [sp, #24]
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r6, r1
+; V6M-NEXT:    subs r4, r0, #1
+; V6M-NEXT:    sbcs r6, r7
+; V6M-NEXT:    ldm r5!, {r0, r1}
+; V6M-NEXT:    ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    ands r1, r6
+; V6M-NEXT:    add sp, #4
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %val = load i64, ptr %w
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    mov.w r12, #1
+; CHECK-NEXT:    subs.w lr, r2, #32
+; CHECK-NEXT:    lsl.w r2, r12, r2
+; CHECK-NEXT:    lsr.w r3, r12, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r12, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs.w lr, r2, #1
+; CHECK-NEXT:    ldrd r0, r2, [r0]
+; CHECK-NEXT:    sbc r12, r3, #0
+; CHECK-NEXT:    rsb.w r3, r1, #32
+; CHECK-NEXT:    lsl.w r3, r2, r3
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r1, #32
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r2, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r0, r0, lr
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_a3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r6, lr}
+; V7A-NEXT:    push {r4, r5, r6, lr}
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    mov r3, #1
+; V7A-NEXT:    ldr r5, [r0, #4]
+; V7A-NEXT:    rsb r0, r2, #32
+; V7A-NEXT:    subs r4, r2, #32
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lsr r0, r3, r0
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    lslpl r0, r3, r4
+; V7A-NEXT:    subs r3, r2, #1
+; V7A-NEXT:    sbc r0, r0, #0
+; V7A-NEXT:    lsr r2, r5, r1
+; V7A-NEXT:    subs r4, r1, #32
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r2, r0, r2
+; V7A-NEXT:    lsr r0, r6, r1
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    orr r0, r0, r5, lsl r1
+; V7A-NEXT:    mov r1, r2
+; V7A-NEXT:    lsrpl r0, r5, r4
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    pop {r4, r5, r6, pc}
+;
+; V7A-T-LABEL: bextr64_a3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    subs.w r3, r2, #32
+; V7A-T-NEXT:    lsl.w r2, lr, r2
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    ldrd r12, r0, [r0]
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    subs.w lr, r2, #1
+; V7A-T-NEXT:    sbc r2, r4, #0
+; V7A-T-NEXT:    lsr.w r4, r0, r1
+; V7A-T-NEXT:    subs.w r3, r1, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r4, #0
+; V7A-T-NEXT:    and.w r2, r2, r4
+; V7A-T-NEXT:    rsb.w r4, r1, #32
+; V7A-T-NEXT:    lsr.w r1, r12, r1
+; V7A-T-NEXT:    lsl.w r4, r0, r4
+; V7A-T-NEXT:    orr.w r1, r1, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r1, r0, r3
+; V7A-T-NEXT:    and.w r0, lr, r1
+; V7A-T-NEXT:    mov r1, r2
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #4
+; V6M-NEXT:    sub sp, #4
+; V6M-NEXT:    str r1, [sp] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    subs r4, r0, #1
+; V6M-NEXT:    sbcs r5, r7
+; V6M-NEXT:    ldm r6!, {r0, r1}
+; V6M-NEXT:    ldr r2, [sp] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    ands r1, r5
+; V6M-NEXT:    add sp, #4
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %val = load i64, ptr %w
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w r4, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r4
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r3, r12, #32
+; V7A-NEXT:    subs r4, r12, #32
+; V7A-NEXT:    lsr r3, lr, r3
+; V7A-NEXT:    lslpl r3, lr, r4
+; V7A-NEXT:    lsl r4, lr, r12
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs r4, r4, #1
+; V7A-NEXT:    sbc r12, r3, #0
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r0, r0, r4
+; V7A-NEXT:    and r1, r1, r12
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    rsb.w r4, r12, #32
+; V7A-T-NEXT:    subs.w r3, r12, #32
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r3
+; V7A-T-NEXT:    lsl.w r3, lr, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    subs r3, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r0, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #12
+; V6M-NEXT:    sub sp, #12
+; V6M-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    ldr r2, [sp, #32]
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    subs r5, r0, #1
+; V6M-NEXT:    sbcs r4, r7
+; V6M-NEXT:    mov r0, r6
+; V6M-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    add sp, #12
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %shifted, %mask ; swapped order
+  ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsls r2, r1
+; CHECK-NEXT:    subs r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r1, r2, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    mov r1, #1
+; V7A-NEXT:    lsl r1, r1, r12
+; V7A-NEXT:    subs r2, r12, #32
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    sub r1, r1, #1
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsl.w r1, r1, r12
+; V7A-T-NEXT:    subs.w r2, r12, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    ldr r2, [sp, #8]
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_a1:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    add r12, r3, lr, lsl r12
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    and r0, r12, r0
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a1:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsl.w r1, r1, r12
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_a1:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r1, [sp, #8]
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    pop {r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_a2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_a2:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    add r12, r3, lr, lsl r12
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    and r0, r12, r0
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_a2:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsl.w r1, r1, r12
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_a2:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r1, [sp, #8]
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    pop {r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_b0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_b0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_b0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_b0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    mvns r1, r1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_b1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_b1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_b1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_b1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    mvns r1, r1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    bx lr
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_b2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_b2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_b2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_b2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #0
+; V6M-NEXT:    mvns r3, r3
+; V6M-NEXT:    lsls r3, r2
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bics r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %shifted = lshr i32 %val, %numskipbits
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_b3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_b3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_b3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_b3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #0
+; V6M-NEXT:    mvns r3, r3
+; V6M-NEXT:    lsls r3, r2
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bics r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_b4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_b4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_b4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_b4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    mvns r1, r1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %shifted, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_b0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsl.w r3, r2, r12
+; CHECK-NEXT:    subs.w lr, r12, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r2, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    bics r1, r2
+; CHECK-NEXT:    bics r0, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    subs lr, r12, #32
+; V7A-NEXT:    lsl r2, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    lslpl r3, r3, lr
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, r5, r7, lr}
+; V7A-T-NEXT:    push {r4, r5, r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #16]
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r5, r0, r3
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    subs.w lr, r12, #32
+; V7A-T-NEXT:    lsl.w r0, r3, r12
+; V7A-T-NEXT:    itt pl
+; V7A-T-NEXT:    lslpl.w r3, r3, lr
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r5, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    bic.w r0, r5, r0
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    ldr r2, [sp, #16]
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r4, r0
+; V6M-NEXT:    bics r5, r1
+; V6M-NEXT:    mov r0, r4
+; V6M-NEXT:    mov r1, r5
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_b1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsr.w r12, r0, r2
+; CHECK-NEXT:    rsb.w r0, r2, #32
+; CHECK-NEXT:    lsl.w r0, r1, r0
+; CHECK-NEXT:    orr.w r12, r12, r0
+; CHECK-NEXT:    subs.w r0, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r12, r1, r0
+; CHECK-NEXT:    lsr.w r0, r1, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    subs.w r1, r3, #32
+; CHECK-NEXT:    lsl.w r3, r2, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl r2, r1
+; CHECK-NEXT:    bic.w r1, r0, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    bic.w r0, r12, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_b1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r12, r0, r2
+; V7A-NEXT:    rsb r0, r2, #32
+; V7A-NEXT:    orr r12, r12, r1, lsl r0
+; V7A-NEXT:    subs r0, r2, #32
+; V7A-NEXT:    lsrpl r12, r1, r0
+; V7A-NEXT:    lsr r0, r1, r2
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    subs r1, r3, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    lsl r3, r2, r3
+; V7A-NEXT:    lslpl r2, r2, r1
+; V7A-NEXT:    bic r1, r0, r2
+; V7A-NEXT:    movwpl r3, #0
+; V7A-NEXT:    bic r0, r12, r3
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_b1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsr.w r12, r0, r2
+; V7A-T-NEXT:    rsb.w r0, r2, #32
+; V7A-T-NEXT:    lsl.w r0, r1, r0
+; V7A-T-NEXT:    orr.w r12, r12, r0
+; V7A-T-NEXT:    subs.w r0, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r12, r1, r0
+; V7A-T-NEXT:    lsr.w r0, r1, r2
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs.w r1, r3, #32
+; V7A-T-NEXT:    lsl.w r3, r2, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r2, r1
+; V7A-T-NEXT:    bic.w r1, r0, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    bic.w r0, r12, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_b1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r4, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    mov r6, r1
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r5, r0
+; V6M-NEXT:    bics r6, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_b2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsl.w r1, r3, r1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    subs.w r1, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r3, r1
+; CHECK-NEXT:    lsr.w r1, r3, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsl.w r3, r2, r12
+; CHECK-NEXT:    subs.w lr, r12, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r2, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    bics r1, r2
+; CHECK-NEXT:    bics r0, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    ldrd r0, r1, [r0]
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    subs lr, r12, #32
+; V7A-NEXT:    lsl r2, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    lslpl r3, r3, lr
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    ldrd r0, r3, [r0]
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    lsl.w r1, r3, r1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    orrs r0, r1
+; V7A-T-NEXT:    subs.w r1, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r3, r1
+; V7A-T-NEXT:    lsr.w r1, r3, r2
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    lsl.w r2, r3, r12
+; V7A-T-NEXT:    subs.w lr, r12, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r3, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    bics r0, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    ldr r2, [sp, #16]
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r4, r0
+; V6M-NEXT:    bics r5, r1
+; V6M-NEXT:    mov r0, r4
+; V6M-NEXT:    mov r1, r5
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %val = load i64, ptr %w
+  %shifted = lshr i64 %val, %numskipbits
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_b3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    ldrd r12, r0, [r0]
+; CHECK-NEXT:    rsb.w r3, r1, #32
+; CHECK-NEXT:    lsl.w lr, r0, r3
+; CHECK-NEXT:    lsr.w r3, r12, r1
+; CHECK-NEXT:    orr.w r12, r3, lr
+; CHECK-NEXT:    subs.w r3, r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r12, r0, r3
+; CHECK-NEXT:    lsr.w r0, r0, r1
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    subs.w r1, r2, #32
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl r3, r1
+; CHECK-NEXT:    bic.w r1, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    bic.w r0, r12, r2
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldm r0, {r0, r3}
+; V7A-NEXT:    lsr r12, r0, r1
+; V7A-NEXT:    rsb r0, r1, #32
+; V7A-NEXT:    orr r12, r12, r3, lsl r0
+; V7A-NEXT:    subs r0, r1, #32
+; V7A-NEXT:    lsrpl r12, r3, r0
+; V7A-NEXT:    lsr r0, r3, r1
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    subs r1, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r1
+; V7A-NEXT:    bic r1, r0, r3
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r0, r12, r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_b3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    ldrd r12, r3, [r0]
+; V7A-T-NEXT:    rsb.w r0, r1, #32
+; V7A-T-NEXT:    lsl.w lr, r3, r0
+; V7A-T-NEXT:    lsr.w r0, r12, r1
+; V7A-T-NEXT:    orr.w r12, r0, lr
+; V7A-T-NEXT:    subs.w r0, r1, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r12, r3, r0
+; V7A-T-NEXT:    lsr.w r0, r3, r1
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs.w r1, r2, #32
+; V7A-T-NEXT:    lsl.w r2, r3, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r1
+; V7A-T-NEXT:    bic.w r1, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    bic.w r0, r12, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bextr64_b3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r4, r2
+; V6M-NEXT:    mov r2, r1
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    mov r6, r1
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r5, r0
+; V6M-NEXT:    bics r6, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %val = load i64, ptr %w
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_b4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsl.w r3, r2, r12
+; CHECK-NEXT:    subs.w lr, r12, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r2, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    bics r1, r2
+; CHECK-NEXT:    bics r0, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_b4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    subs lr, r12, #32
+; V7A-NEXT:    lsl r2, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    lslpl r3, r3, lr
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_b4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, r5, r7, lr}
+; V7A-T-NEXT:    push {r4, r5, r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #16]
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r5, r0, r3
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    subs.w lr, r12, #32
+; V7A-T-NEXT:    lsl.w r0, r3, r12
+; V7A-T-NEXT:    itt pl
+; V7A-T-NEXT:    lslpl.w r3, r3, lr
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r5, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    bic.w r0, r5, r0
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_b4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    ldr r2, [sp, #16]
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r4, r0
+; V6M-NEXT:    bics r5, r1
+; V6M-NEXT:    mov r0, r4
+; V6M-NEXT:    mov r1, r5
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %shifted, %mask ; swapped order
+  ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsls r2, r1
+; CHECK-NEXT:    subs r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    bics r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_b0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldrb r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    lsl r1, r1, r12
+; V7A-NEXT:    subs r2, r12, #32
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    bic r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_b0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsr.w r12, r0, r2
+; V7A-T-NEXT:    rsb.w r0, r2, #32
+; V7A-T-NEXT:    ldrb.w r3, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r0, r1, r0
+; V7A-T-NEXT:    orr.w r0, r0, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    mov.w r1, #-1
+; V7A-T-NEXT:    lsls r1, r3
+; V7A-T-NEXT:    subs.w r2, r3, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_b0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    add r1, sp, #8
+; V6M-NEXT:    ldrb r2, [r1]
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r4, r0
+; V6M-NEXT:    mov r0, r4
+; V6M-NEXT:    pop {r4, pc}
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i64
+  %notmask = shl nsw i64 -1, %widenumlowbits
+  %mask = xor i64 %notmask, -1
+  %wideres = and i64 %shiftedval, %mask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_b1:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldrb r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r12
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_b1:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldrb.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    mov.w r1, #-1
+; V7A-T-NEXT:    lsl.w r1, r1, r12
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_b1:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    add r1, sp, #8
+; V6M-NEXT:    ldrb r1, [r1]
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    pop {r7, pc}
+  %shiftedval = lshr i64 %val, %numskipbits
+  %truncshiftedval = trunc i64 %shiftedval to i32
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %res = and i32 %truncshiftedval, %mask
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_b2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_b2:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldrb r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    bic r0, r0, r1, lsl r12
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_b2:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldrb.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    mov.w r1, #-1
+; V7A-T-NEXT:    lsl.w r1, r1, r12
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_b2:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    add r1, sp, #8
+; V6M-NEXT:    ldrb r1, [r1]
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    pop {r7, pc}
+  %shiftedval = lshr i64 %val, %numskipbits
+  %widenumlowbits = zext i8 %numlowbits to i32
+  %notmask = shl nsw i32 -1, %widenumlowbits
+  %mask = xor i32 %notmask, -1
+  %zextmask = zext i32 %mask to i64
+  %wideres = and i64 %shiftedval, %zextmask
+  %res = trunc i64 %wideres to i32
+  ret i32 %res
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_c0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_c0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r2, r3, r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_c0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_c0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_c1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_c1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    uxtb r2, r2
+; V7A-NEXT:    lsr r2, r3, r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_c1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_c1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_c2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_c2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r2, r3, r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_c2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    rsb.w r2, r2, #32
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsr.w r2, r3, r2
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_c2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #32
+; V6M-NEXT:    subs r1, r0, r2
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %shifted = lshr i32 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_c3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_c3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    uxtb r2, r2
+; V7A-NEXT:    lsr r2, r3, r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_c3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_c3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #32
+; V6M-NEXT:    subs r0, r0, r2
+; V6M-NEXT:    uxtb r1, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_c4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_c4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_c4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_c4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %shifted, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_c0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    ldr.w r12, [sp]
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    rsb.w r3, r12, #64
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsr.w r3, r2, r3
+; CHECK-NEXT:    rsbs.w r12, r12, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r2, r2, r12
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_c0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r11, lr}
+; V7A-NEXT:    push {r4, r5, r11, lr}
+; V7A-NEXT:    ldr r12, [sp, #16]
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r5, r1, r2
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r4, r12, #64
+; V7A-NEXT:    rsbs lr, r12, #32
+; V7A-NEXT:    lsr r4, r3, r4
+; V7A-NEXT:    lsrpl r3, r3, lr
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs lr, r2, #32
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    movwpl r5, #0
+; V7A-NEXT:    and r12, r4, r5
+; V7A-NEXT:    orr r0, r0, r1, lsl r2
+; V7A-NEXT:    lsrpl r0, r1, lr
+; V7A-NEXT:    mov r1, r12
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #-1
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orrs r0, r3
+; V7A-T-NEXT:    subs.w r3, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    rsbs.w r2, r12, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r3, r2
+; V7A-T-NEXT:    rsb.w r2, r12, #64
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    lsr.w r2, lr, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    ands r1, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    ldr r0, [sp, #16]
+; V6M-NEXT:    movs r1, #64
+; V6M-NEXT:    subs r2, r1, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_c1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    uxtb r2, r2
+; CHECK-NEXT:    lsr.w r12, r0, r2
+; CHECK-NEXT:    rsb.w r0, r2, #32
+; CHECK-NEXT:    lsl.w r0, r1, r0
+; CHECK-NEXT:    orr.w r12, r12, r0
+; CHECK-NEXT:    subs.w r0, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r12, r1, r0
+; CHECK-NEXT:    rsb.w r0, r3, #64
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    subs.w lr, r0, #32
+; CHECK-NEXT:    lsr.w r2, r3, r0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r3, r3, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    and.w r0, r3, r12
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    uxtb r12, r2
+; V7A-NEXT:    lsr lr, r0, r12
+; V7A-NEXT:    rsb r0, r12, #32
+; V7A-NEXT:    orr r4, lr, r1, lsl r0
+; V7A-NEXT:    mvn lr, #31
+; V7A-NEXT:    uxtab r2, lr, r2
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    lsrpl r4, r1, r2
+; V7A-NEXT:    rsb r2, r3, #64
+; V7A-NEXT:    lsr r1, r1, r12
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    uxtb r12, r2
+; V7A-NEXT:    uxtab r2, lr, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    lsr r0, r3, r12
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    and r1, r0, r1
+; V7A-NEXT:    lsrpl r3, r3, r2
+; V7A-NEXT:    and r0, r3, r4
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    uxtb.w r12, r2
+; V7A-T-NEXT:    lsr.w lr, r0, r12
+; V7A-T-NEXT:    rsb.w r0, r12, #32
+; V7A-T-NEXT:    lsl.w r0, r1, r0
+; V7A-T-NEXT:    orr.w r4, lr, r0
+; V7A-T-NEXT:    mvn lr, #31
+; V7A-T-NEXT:    uxtab r2, lr, r2
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r4, r1, r2
+; V7A-T-NEXT:    rsb.w r2, r3, #64
+; V7A-T-NEXT:    lsr.w r1, r1, r12
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    uxtb.w r12, r2
+; V7A-T-NEXT:    uxtab r2, lr, r2
+; V7A-T-NEXT:    lsr.w r0, r3, r12
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    and.w r1, r1, r0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r3, r2
+; V7A-T-NEXT:    and.w r0, r3, r4
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_c1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r3
+; V6M-NEXT:    uxtb r2, r2
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r0, r0, r5
+; V6M-NEXT:    uxtb r2, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r6
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_c2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    ldr.w r12, [sp]
+; CHECK-NEXT:    lsl.w r1, r3, r1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    subs.w r1, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r3, r1
+; CHECK-NEXT:    lsr.w r1, r3, r2
+; CHECK-NEXT:    rsb.w r3, r12, #64
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    rsbs.w r12, r12, #32
+; CHECK-NEXT:    lsr.w r3, r2, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r2, r2, r12
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_c2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r6, r8, lr}
+; V7A-NEXT:    push {r4, r6, r8, lr}
+; V7A-NEXT:    ldr r12, [sp, #16]
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    rsb r6, r12, #64
+; V7A-NEXT:    ldr r8, [r0]
+; V7A-NEXT:    mvn r0, #0
+; V7A-NEXT:    rsbs r1, r12, #32
+; V7A-NEXT:    lsr r6, r0, r6
+; V7A-NEXT:    lsr r4, r3, r2
+; V7A-NEXT:    lsrpl r0, r0, r1
+; V7A-NEXT:    movwpl r6, #0
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    and r1, r6, r4
+; V7A-NEXT:    lsr r6, r8, r2
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    orr r2, r6, r3, lsl r2
+; V7A-NEXT:    lsrpl r2, r3, r12
+; V7A-NEXT:    and r0, r0, r2
+; V7A-NEXT:    pop {r4, r6, r8, pc}
+;
+; V7A-T-LABEL: bextr64_c2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldrd r0, r3, [r0]
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    lsl.w r1, r3, r1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    orrs r0, r1
+; V7A-T-NEXT:    subs.w r1, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r3, r1
+; V7A-T-NEXT:    lsr.w r1, r3, r2
+; V7A-T-NEXT:    rsb.w r2, r12, #64
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    rsbs.w r12, r12, #32
+; V7A-T-NEXT:    lsr.w r2, r3, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r3, r3, r12
+; V7A-T-NEXT:    ands r1, r2
+; V7A-T-NEXT:    ands r0, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_c2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    ldr r0, [sp, #16]
+; V6M-NEXT:    movs r1, #64
+; V6M-NEXT:    subs r2, r1, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %val = load i64, ptr %w
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_c3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsr.w r12, r0, r1
+; CHECK-NEXT:    rsb.w r0, r1, #32
+; CHECK-NEXT:    lsl.w r0, r3, r0
+; CHECK-NEXT:    orr.w r12, r12, r0
+; CHECK-NEXT:    subs.w r0, r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r12, r3, r0
+; CHECK-NEXT:    rsb.w r0, r2, #64
+; CHECK-NEXT:    lsr.w r1, r3, r1
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    subs.w lr, r0, #32
+; CHECK-NEXT:    lsr.w r2, r3, r0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r3, r3, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    and.w r0, r3, r12
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bextr64_c3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r4, [r0]
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    uxtb r0, r1
+; V7A-NEXT:    lsr r12, r4, r0
+; V7A-NEXT:    rsb r4, r0, #32
+; V7A-NEXT:    lsr r0, r3, r0
+; V7A-NEXT:    orr lr, r12, r3, lsl r4
+; V7A-NEXT:    mvn r12, #31
+; V7A-NEXT:    uxtab r1, r12, r1
+; V7A-NEXT:    cmp r1, #0
+; V7A-NEXT:    lsrpl lr, r3, r1
+; V7A-NEXT:    rsb r1, r2, #64
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    uxtb r2, r1
+; V7A-NEXT:    uxtab r4, r12, r1
+; V7A-NEXT:    lsr r2, r3, r2
+; V7A-NEXT:    cmp r4, #0
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r1, r2, r0
+; V7A-NEXT:    lsrpl r3, r3, r4
+; V7A-NEXT:    and r0, r3, lr
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_c3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, r5, r7, lr}
+; V7A-T-NEXT:    push {r4, r5, r7, lr}
+; V7A-T-NEXT:    ldrd r12, lr, [r0]
+; V7A-T-NEXT:    uxtb r0, r1
+; V7A-T-NEXT:    rsb.w r3, r0, #32
+; V7A-T-NEXT:    lsl.w r4, lr, r3
+; V7A-T-NEXT:    lsr.w r3, r12, r0
+; V7A-T-NEXT:    orr.w r5, r3, r4
+; V7A-T-NEXT:    mvn r12, #31
+; V7A-T-NEXT:    uxtab r1, r12, r1
+; V7A-T-NEXT:    lsr.w r0, lr, r0
+; V7A-T-NEXT:    cmp r1, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r5, lr, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #64
+; V7A-T-NEXT:    mov.w r4, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    uxtb r2, r1
+; V7A-T-NEXT:    uxtab r3, r12, r1
+; V7A-T-NEXT:    lsr.w r2, r4, r2
+; V7A-T-NEXT:    cmp r3, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    and.w r1, r2, r0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r4, r3
+; V7A-T-NEXT:    and.w r0, r4, r5
+; V7A-T-NEXT:    pop {r4, r5, r7, pc}
+;
+; V6M-LABEL: bextr64_c3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r2
+; V6M-NEXT:    ldr r4, [r0]
+; V6M-NEXT:    ldr r3, [r0, #4]
+; V6M-NEXT:    uxtb r2, r1
+; V6M-NEXT:    mov r0, r4
+; V6M-NEXT:    mov r1, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r0, r0, r5
+; V6M-NEXT:    uxtb r2, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r6
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %val = load i64, ptr %w
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_c4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    ldr.w r12, [sp]
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    rsb.w r3, r12, #64
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsr.w r3, r2, r3
+; CHECK-NEXT:    rsbs.w r12, r12, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r2, r2, r12
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_c4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r11, lr}
+; V7A-NEXT:    push {r4, r5, r11, lr}
+; V7A-NEXT:    ldr r12, [sp, #16]
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r5, r1, r2
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r4, r12, #64
+; V7A-NEXT:    rsbs lr, r12, #32
+; V7A-NEXT:    lsr r4, r3, r4
+; V7A-NEXT:    lsrpl r3, r3, lr
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs lr, r2, #32
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    movwpl r5, #0
+; V7A-NEXT:    and r12, r5, r4
+; V7A-NEXT:    orr r0, r0, r1, lsl r2
+; V7A-NEXT:    lsrpl r0, r1, lr
+; V7A-NEXT:    mov r1, r12
+; V7A-NEXT:    and r0, r0, r3
+; V7A-NEXT:    pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_c4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #-1
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orrs r0, r3
+; V7A-T-NEXT:    subs.w r3, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    rsbs.w r2, r12, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r3, r2
+; V7A-T-NEXT:    rsb.w r2, r12, #64
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    lsr.w r2, lr, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    ands r1, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bextr64_c4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    ldr r0, [sp, #16]
+; V6M-NEXT:    movs r1, #64
+; V6M-NEXT:    subs r2, r1, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %shifted, %mask ; swapped order
+  ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    rsbs.w r1, r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl r2, r1
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_c0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r3, [sp]
+; V7A-NEXT:    rsbs r12, r3, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsrpl r3, r3, r12
+; V7A-NEXT:    lsr r12, r0, r2
+; V7A-NEXT:    rsb r0, r2, #32
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r12, r1, lsl r0
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_c0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    rsbs.w r1, r12, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r2, r1
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_c0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    ldr r0, [sp, #8]
+; V6M-NEXT:    movs r1, #64
+; V6M-NEXT:    subs r2, r1, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %shifted
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_c1:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    rsb r1, r12, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_c1:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    rsb.w r1, r12, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_c1:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r1, [sp, #8]
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    pop {r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %truncshifted
+  ret i32 %masked
+}
+
+; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
+; Masking is 64-bit. Then truncation.
+define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_c2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_c2:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    rsb r1, r12, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_c2:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    rsb.w r1, r12, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_c2:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r1, [sp, #8]
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    pop {r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %zextmask = zext i32 %mask to i64
+  %masked = and i64 %zextmask, %shifted
+  %truncmasked = trunc i64 %masked to i32
+  ret i32 %truncmasked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_d0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_d0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_d0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_d0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #32
+; V6M-NEXT:    subs r2, r3, r2
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    lsrs r0, r2
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %shifted, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_d1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_d1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_d1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_d1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %shifted, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_d2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_d2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_d2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_d2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #32
+; V6M-NEXT:    subs r2, r3, r2
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    lsrs r0, r2
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %shifted = lshr i32 %val, %numskipbits
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %shifted, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_d3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_d3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_d3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_d3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %shifted, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_d0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    rsb.w r3, r12, #64
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    rsb.w lr, r12, #32
+; CHECK-NEXT:    rsb.w r12, r3, #32
+; CHECK-NEXT:    lsls r1, r3
+; CHECK-NEXT:    cmp.w lr, #0
+; CHECK-NEXT:    lsr.w r4, r0, r12
+; CHECK-NEXT:    orr.w r1, r1, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, lr
+; CHECK-NEXT:    lsl.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r2, r1, r12
+; CHECK-NEXT:    lsr.w r0, r0, r3
+; CHECK-NEXT:    orr.w r0, r0, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, lr
+; CHECK-NEXT:    lsr.w r1, r1, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    lsr r3, r1, r2
+; V7A-NEXT:    subs lr, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    movwpl r3, #0
+; V7A-NEXT:    orr r0, r0, r1, lsl r2
+; V7A-NEXT:    lsrpl r0, r1, lr
+; V7A-NEXT:    rsb r1, r12, #64
+; V7A-NEXT:    rsb lr, r1, #32
+; V7A-NEXT:    lsr r2, r0, lr
+; V7A-NEXT:    orr r2, r2, r3, lsl r1
+; V7A-NEXT:    rsbs r3, r12, #32
+; V7A-NEXT:    lslpl r2, r0, r3
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    lsr r1, r2, r1
+; V7A-NEXT:    orr r0, r0, r2, lsl lr
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    lsrpl r0, r2, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orrs r0, r3
+; V7A-T-NEXT:    subs.w r3, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    rsb.w r3, r12, #64
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    lsls r1, r3
+; V7A-T-NEXT:    rsbs.w r2, r12, #32
+; V7A-T-NEXT:    lsr.w r4, r0, lr
+; V7A-T-NEXT:    orr.w r1, r1, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r1, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r4, r1, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    lsr.w r1, r1, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r2, [sp, #8]
+; V6M-NEXT:    movs r3, #64
+; V6M-NEXT:    subs r4, r3, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %shifted, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_d1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    uxtb.w lr, r2
+; CHECK-NEXT:    subs.w r2, lr, #32
+; CHECK-NEXT:    lsr.w r12, r0, lr
+; CHECK-NEXT:    rsb.w r0, lr, #32
+; CHECK-NEXT:    lsl.w r0, r1, r0
+; CHECK-NEXT:    orr.w r0, r0, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    rsb.w r2, r3, #64
+; CHECK-NEXT:    lsr.w r1, r1, lr
+; CHECK-NEXT:    uxtb r2, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    rsb.w r12, r2, #32
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    sub.w r3, r2, #32
+; CHECK-NEXT:    lsr.w r4, r0, r12
+; CHECK-NEXT:    orrs r1, r4
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, r3
+; CHECK-NEXT:    lsl.w r0, r0, r2
+; CHECK-NEXT:    lsl.w r4, r1, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsr.w r0, r0, r2
+; CHECK-NEXT:    orr.w r0, r0, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r11, lr}
+; V7A-NEXT:    push {r4, r5, r11, lr}
+; V7A-NEXT:    uxtb r12, r2
+; V7A-NEXT:    lsr lr, r0, r12
+; V7A-NEXT:    rsb r0, r12, #32
+; V7A-NEXT:    orr r0, lr, r1, lsl r0
+; V7A-NEXT:    mvn lr, #31
+; V7A-NEXT:    uxtab r2, lr, r2
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    rsb r2, r3, #64
+; V7A-NEXT:    lsr r1, r1, r12
+; V7A-NEXT:    uxtb r3, r2
+; V7A-NEXT:    rsb r4, r3, #32
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    uxtab r2, lr, r2
+; V7A-NEXT:    lsr r5, r0, r4
+; V7A-NEXT:    orr r1, r5, r1, lsl r3
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    lslpl r1, r0, r2
+; V7A-NEXT:    lsl r0, r0, r3
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r3
+; V7A-NEXT:    orr r0, r0, r1, lsl r4
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    lsr r1, r1, r3
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, r5, r6, r7, lr}
+; V7A-T-NEXT:    push {r4, r5, r6, r7, lr}
+; V7A-T-NEXT:    uxtb.w r12, r2
+; V7A-T-NEXT:    rsb.w r6, r12, #32
+; V7A-T-NEXT:    rsb.w r3, r3, #64
+; V7A-T-NEXT:    lsr.w r0, r0, r12
+; V7A-T-NEXT:    mvn r7, #31
+; V7A-T-NEXT:    uxtab r2, r7, r2
+; V7A-T-NEXT:    lsl.w r6, r1, r6
+; V7A-T-NEXT:    lsr.w lr, r1, r12
+; V7A-T-NEXT:    orrs r0, r6
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w lr, #0
+; V7A-T-NEXT:    uxtb r5, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    rsb.w r1, r5, #32
+; V7A-T-NEXT:    uxtab r3, r7, r3
+; V7A-T-NEXT:    lsl.w r4, lr, r5
+; V7A-T-NEXT:    lsr.w r2, r0, r1
+; V7A-T-NEXT:    cmp r3, #0
+; V7A-T-NEXT:    orr.w r2, r2, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r2, r0, r3
+; V7A-T-NEXT:    lsl.w r0, r0, r5
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    lsr.w r0, r0, r5
+; V7A-T-NEXT:    orr.w r0, r0, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r5
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r2, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, r5, r6, r7, pc}
+;
+; V6M-LABEL: bextr64_d1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    mov r4, r3
+; V6M-NEXT:    uxtb r2, r2
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    movs r2, #64
+; V6M-NEXT:    subs r2, r2, r4
+; V6M-NEXT:    uxtb r4, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %shifted, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_d2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsl.w r1, r3, r1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w lr, r12, #32
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    subs.w r1, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r3, r1
+; CHECK-NEXT:    rsb.w r1, r12, #64
+; CHECK-NEXT:    lsr.w r2, r3, r2
+; CHECK-NEXT:    rsb.w r12, r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    cmp.w lr, #0
+; CHECK-NEXT:    lsl.w r2, r2, r1
+; CHECK-NEXT:    lsr.w r4, r0, r12
+; CHECK-NEXT:    orr.w r2, r2, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r0, lr
+; CHECK-NEXT:    lsl.w r0, r0, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r3, r2, r12
+; CHECK-NEXT:    lsr.w r0, r0, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r2, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    ldrd r0, r1, [r0]
+; V7A-NEXT:    subs lr, r2, #32
+; V7A-NEXT:    lsr r3, r1, r2
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    movwpl r3, #0
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r2
+; V7A-NEXT:    lsrpl r0, r1, lr
+; V7A-NEXT:    rsb r1, r12, #64
+; V7A-NEXT:    rsb lr, r1, #32
+; V7A-NEXT:    lsr r2, r0, lr
+; V7A-NEXT:    orr r2, r2, r3, lsl r1
+; V7A-NEXT:    rsbs r3, r12, #32
+; V7A-NEXT:    lslpl r2, r0, r3
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    lsr r1, r2, r1
+; V7A-NEXT:    orr r0, r0, r2, lsl lr
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    lsrpl r0, r2, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_d2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldrd r0, r3, [r0]
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    lsl.w r1, r3, r1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    orrs r0, r1
+; V7A-T-NEXT:    subs.w r1, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r3, r1
+; V7A-T-NEXT:    lsr.w r2, r3, r2
+; V7A-T-NEXT:    rsb.w r1, r12, #64
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    rsb.w lr, r1, #32
+; V7A-T-NEXT:    rsbs.w r3, r12, #32
+; V7A-T-NEXT:    lsl.w r2, r2, r1
+; V7A-T-NEXT:    lsr.w r4, r0, lr
+; V7A-T-NEXT:    orr.w r2, r2, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r2, r0, r3
+; V7A-T-NEXT:    lsl.w r0, r0, r1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r4, r2, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    orr.w r0, r0, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r2, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_d2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r2, [sp, #8]
+; V6M-NEXT:    movs r3, #64
+; V6M-NEXT:    subs r4, r3, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %shifted, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_d3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldrd r0, lr, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    subs.w r3, r1, #32
+; CHECK-NEXT:    lsr.w r12, r0, r1
+; CHECK-NEXT:    rsb.w r0, r1, #32
+; CHECK-NEXT:    lsr.w r1, lr, r1
+; CHECK-NEXT:    uxtb r2, r2
+; CHECK-NEXT:    lsl.w r0, lr, r0
+; CHECK-NEXT:    orr.w r0, r0, r12
+; CHECK-NEXT:    rsb.w r12, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, lr, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    sub.w r3, r2, #32
+; CHECK-NEXT:    lsr.w r4, r0, r12
+; CHECK-NEXT:    orrs r1, r4
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, r3
+; CHECK-NEXT:    lsl.w r0, r0, r2
+; CHECK-NEXT:    lsl.w r4, r1, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsr.w r0, r0, r2
+; CHECK-NEXT:    orr.w r0, r0, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_d3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r5, r11, lr}
+; V7A-NEXT:    push {r4, r5, r11, lr}
+; V7A-NEXT:    ldr r4, [r0]
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    uxtb r0, r1
+; V7A-NEXT:    lsr r12, r4, r0
+; V7A-NEXT:    rsb r4, r0, #32
+; V7A-NEXT:    lsr r0, r3, r0
+; V7A-NEXT:    orr r4, r12, r3, lsl r4
+; V7A-NEXT:    mvn r12, #31
+; V7A-NEXT:    uxtab r1, r12, r1
+; V7A-NEXT:    cmp r1, #0
+; V7A-NEXT:    lsrpl r4, r3, r1
+; V7A-NEXT:    rsb r1, r2, #64
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    uxtb r2, r1
+; V7A-NEXT:    rsb lr, r2, #32
+; V7A-NEXT:    uxtab r1, r12, r1
+; V7A-NEXT:    lsr r5, r4, lr
+; V7A-NEXT:    orr r3, r5, r0, lsl r2
+; V7A-NEXT:    cmp r1, #0
+; V7A-NEXT:    lsl r0, r4, r2
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lslpl r3, r4, r1
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    orr r0, r0, r3, lsl lr
+; V7A-NEXT:    lsrpl r0, r3, r1
+; V7A-NEXT:    lsr r1, r3, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r4, r5, r11, pc}
+;
+; V7A-T-LABEL: bextr64_d3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, r5, r6, lr}
+; V7A-T-NEXT:    push {r4, r5, r6, lr}
+; V7A-T-NEXT:    ldrd r12, lr, [r0]
+; V7A-T-NEXT:    uxtb r0, r1
+; V7A-T-NEXT:    rsb.w r6, r0, #32
+; V7A-T-NEXT:    lsr.w r3, lr, r0
+; V7A-T-NEXT:    rsb.w r2, r2, #64
+; V7A-T-NEXT:    mvn r4, #31
+; V7A-T-NEXT:    lsr.w r0, r12, r0
+; V7A-T-NEXT:    uxtab r1, r4, r1
+; V7A-T-NEXT:    lsl.w r6, lr, r6
+; V7A-T-NEXT:    orrs r0, r6
+; V7A-T-NEXT:    cmp r1, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    uxtb r5, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, lr, r1
+; V7A-T-NEXT:    rsb.w r1, r5, #32
+; V7A-T-NEXT:    lsls r3, r5
+; V7A-T-NEXT:    uxtab r2, r4, r2
+; V7A-T-NEXT:    lsr.w r6, r0, r1
+; V7A-T-NEXT:    orrs r3, r6
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r5
+; V7A-T-NEXT:    lsl.w r1, r3, r1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsr.w r0, r0, r5
+; V7A-T-NEXT:    orr.w r0, r0, r1
+; V7A-T-NEXT:    lsr.w r1, r3, r5
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r3, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, r5, r6, pc}
+;
+; V6M-LABEL: bextr64_d3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r2
+; V6M-NEXT:    ldr r5, [r0]
+; V6M-NEXT:    ldr r3, [r0, #4]
+; V6M-NEXT:    uxtb r2, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r3
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    movs r2, #64
+; V6M-NEXT:    subs r2, r2, r4
+; V6M-NEXT:    uxtb r4, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %val = load i64, ptr %w
+  %skip = zext i8 %numskipbits to i64
+  %shifted = lshr i64 %val, %skip
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %shifted, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+; 64-bit, but with 32-bit output
+
+; Everything done in 64-bit, truncation happens last.
+define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_d0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    subs.w r3, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r3
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    rsb.w r3, r12, #64
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    rsb.w lr, r12, #32
+; CHECK-NEXT:    rsb.w r12, r3, #32
+; CHECK-NEXT:    lsls r1, r3
+; CHECK-NEXT:    cmp.w lr, #0
+; CHECK-NEXT:    lsr.w r4, r0, r12
+; CHECK-NEXT:    orr.w r1, r1, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, lr
+; CHECK-NEXT:    lsl.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r2, r1, r12
+; CHECK-NEXT:    lsr.w r0, r0, r3
+; CHECK-NEXT:    orr.w r0, r0, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, lr
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_32_d0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    lsr r3, r1, r2
+; V7A-NEXT:    subs lr, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r2, r2, #32
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    movwpl r3, #0
+; V7A-NEXT:    orr r0, r0, r1, lsl r2
+; V7A-NEXT:    lsrpl r0, r1, lr
+; V7A-NEXT:    rsb r1, r12, #64
+; V7A-NEXT:    rsb lr, r1, #32
+; V7A-NEXT:    lsr r2, r0, lr
+; V7A-NEXT:    orr r2, r2, r3, lsl r1
+; V7A-NEXT:    rsbs r3, r12, #32
+; V7A-NEXT:    lslpl r2, r0, r3
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    orr r0, r0, r2, lsl lr
+; V7A-NEXT:    lsrpl r0, r2, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bextr64_32_d0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orrs r0, r3
+; V7A-T-NEXT:    subs.w r3, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    rsb.w r3, r12, #64
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    lsls r1, r3
+; V7A-T-NEXT:    rsbs.w r2, r12, #32
+; V7A-T-NEXT:    lsr.w r4, r0, lr
+; V7A-T-NEXT:    orr.w r1, r1, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r1, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r4, r1, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_32_d0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r2, [sp, #8]
+; V6M-NEXT:    movs r3, #64
+; V6M-NEXT:    subs r4, r3, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %shifted, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  %res = trunc i64 %masked to i32
+  ret i32 %res
+}
+
+; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
+define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_32_d1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    ldr r1, [sp]
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bextr64_32_d1:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    ldr r12, [sp]
+; V7A-NEXT:    subs r2, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    rsb r1, r12, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr64_32_d1:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    ldr.w r12, [sp]
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    lsl.w r3, r1, r3
+; V7A-T-NEXT:    orr.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    rsb.w r1, r12, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr64_32_d1:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r7, lr}
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldr r1, [sp, #8]
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    pop {r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %truncshifted = trunc i64 %shifted to i32
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %truncshifted, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant
+; ---------------------------------------------------------------------------- ;
+
+; https://bugs.llvm.org/show_bug.cgi?id=38938
+define void @pr38938(ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: pr38938:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    ubfx r1, r1, #21, #10
+; CHECK-NEXT:    ldr.w r2, [r0, r1, lsl #2]
+; CHECK-NEXT:    adds r2, #1
+; CHECK-NEXT:    str.w r2, [r0, r1, lsl #2]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: pr38938:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r1, [r1]
+; V7A-NEXT:    ubfx r1, r1, #21, #10
+; V7A-NEXT:    ldr r2, [r0, r1, lsl #2]
+; V7A-NEXT:    add r2, r2, #1
+; V7A-NEXT:    str r2, [r0, r1, lsl #2]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: pr38938:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r1, [r1]
+; V7A-T-NEXT:    ubfx r1, r1, #21, #10
+; V7A-T-NEXT:    ldr.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT:    adds r2, #1
+; V7A-T-NEXT:    str.w r2, [r0, r1, lsl #2]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: pr38938:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, [r1]
+; V6M-NEXT:    lsrs r1, r1, #19
+; V6M-NEXT:    ldr r2, .LCPI51_0
+; V6M-NEXT:    ands r2, r1
+; V6M-NEXT:    ldr r1, [r0, r2]
+; V6M-NEXT:    adds r1, r1, #1
+; V6M-NEXT:    str r1, [r0, r2]
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI51_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp = load i64, ptr %a1, align 8
+  %tmp1 = lshr i64 %tmp, 21
+  %tmp2 = and i64 %tmp1, 1023
+  %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2
+  %tmp4 = load i32, ptr %tmp3, align 4
+  %tmp5 = add nsw i32 %tmp4, 1
+  store i32 %tmp5, ptr %tmp3, align 4
+  ret void
+}
+
+; The most canonical variant
+define i32 @c0_i32(i32 %arg) nounwind {
+; CHECK-LABEL: c0_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ubfx r0, r0, #19, #10
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c0_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ubfx r0, r0, #19, #10
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c0_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ubfx r0, r0, #19, #10
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c0_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsls r0, r0, #3
+; V6M-NEXT:    lsrs r0, r0, #22
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i32 @c1_i32(i32 %arg) nounwind {
+; CHECK-LABEL: c1_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, #4092
+; CHECK-NEXT:    and.w r0, r1, r0, lsr #19
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c1_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r1, #4092
+; V7A-NEXT:    and r0, r1, r0, lsr #19
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c1_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movw r1, #4092
+; V7A-T-NEXT:    and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c1_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r1, r0, #19
+; V6M-NEXT:    ldr r0, .LCPI53_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI53_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 4092
+  ret i32 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i32 @c2_i32(i32 %arg) nounwind {
+; CHECK-LABEL: c2_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, #4092
+; CHECK-NEXT:    and.w r0, r1, r0, lsr #17
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c2_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r1, #4092
+; V7A-NEXT:    and r0, r1, r0, lsr #17
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c2_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movw r1, #4092
+; V7A-T-NEXT:    and.w r0, r1, r0, lsr #17
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c2_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r1, r0, #17
+; V6M-NEXT:    ldr r0, .LCPI54_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI54_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  %tmp2 = shl i32 %tmp1, 2
+  ret i32 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i32 @c4_i32_bad(i32 %arg) nounwind {
+; CHECK-LABEL: c4_i32_bad:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mvn r1, #1
+; CHECK-NEXT:    and.w r0, r1, r0, lsr #19
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c4_i32_bad:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r1, #1
+; V7A-NEXT:    and r0, r1, r0, lsr #19
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c4_i32_bad:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mvn r1, #1
+; V7A-T-NEXT:    and.w r0, r1, r0, lsr #19
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c4_i32_bad:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r0, #20
+; V6M-NEXT:    lsls r0, r0, #1
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 16382
+  ret i32 %tmp1
+}
+
+; i64
+
+; The most canonical variant
+define i64 @c0_i64(i64 %arg) nounwind {
+; CHECK-LABEL: c0_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ubfx r0, r1, #19, #10
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c0_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ubfx r0, r1, #19, #10
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c0_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ubfx r0, r1, #19, #10
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c0_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsls r0, r1, #3
+; V6M-NEXT:    lsrs r0, r0, #22
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 1023
+  ret i64 %tmp1
+}
+
+; Should be still fine, but the mask is shifted
+define i64 @c1_i64(i64 %arg) nounwind {
+; CHECK-LABEL: c1_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r0, #4092
+; CHECK-NEXT:    and.w r0, r0, r1, lsr #19
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c1_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r0, #4092
+; V7A-NEXT:    and r0, r0, r1, lsr #19
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c1_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movw r0, #4092
+; V7A-T-NEXT:    and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c1_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r1, r1, #19
+; V6M-NEXT:    ldr r0, .LCPI57_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI57_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 4092
+  ret i64 %tmp1
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define i64 @c2_i64(i64 %arg) nounwind {
+; CHECK-LABEL: c2_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r0, #4092
+; CHECK-NEXT:    and.w r0, r0, r1, lsr #17
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c2_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r0, #4092
+; V7A-NEXT:    and r0, r0, r1, lsr #17
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c2_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movw r0, #4092
+; V7A-T-NEXT:    and.w r0, r0, r1, lsr #17
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c2_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r1, r1, #17
+; V6M-NEXT:    ldr r0, .LCPI58_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI58_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 1023
+  %tmp2 = shl i64 %tmp1, 2
+  ret i64 %tmp2
+}
+
+; The mask covers newly shifted-in bit
+define i64 @c4_i64_bad(i64 %arg) nounwind {
+; CHECK-LABEL: c4_i64_bad:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mvn r0, #1
+; CHECK-NEXT:    and.w r0, r0, r1, lsr #19
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c4_i64_bad:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r0, #1
+; V7A-NEXT:    and r0, r0, r1, lsr #19
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c4_i64_bad:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mvn r0, #1
+; V7A-T-NEXT:    and.w r0, r0, r1, lsr #19
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c4_i64_bad:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1, #20
+; V6M-NEXT:    lsls r0, r0, #1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 16382
+  ret i64 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant, storing the result afterwards.
+; ---------------------------------------------------------------------------- ;
+
+; i32
+
+; The most canonical variant
+define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c5_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ubfx r0, r0, #19, #10
+; CHECK-NEXT:    str r0, [r1]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c5_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ubfx r0, r0, #19, #10
+; V7A-NEXT:    str r0, [r1]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c5_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ubfx r0, r0, #19, #10
+; V7A-T-NEXT:    str r0, [r1]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c5_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsls r0, r0, #3
+; V6M-NEXT:    lsrs r0, r0, #22
+; V6M-NEXT:    str r0, [r1]
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  store i32 %tmp1, ptr %ptr
+  ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c6_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ubfx r0, r0, #19, #12
+; CHECK-NEXT:    str r0, [r1]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c6_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ubfx r0, r0, #19, #12
+; V7A-NEXT:    str r0, [r1]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c6_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ubfx r0, r0, #19, #12
+; V7A-T-NEXT:    str r0, [r1]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c6_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsls r0, r0, #1
+; V6M-NEXT:    lsrs r0, r0, #20
+; V6M-NEXT:    str r0, [r1]
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 4095
+  store i32 %tmp1, ptr %ptr
+  ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c7_i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, #4092
+; CHECK-NEXT:    and.w r0, r2, r0, lsr #17
+; CHECK-NEXT:    str r0, [r1]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c7_i32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r2, #4092
+; V7A-NEXT:    and r0, r2, r0, lsr #17
+; V7A-NEXT:    str r0, [r1]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c7_i32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movw r2, #4092
+; V7A-T-NEXT:    and.w r0, r2, r0, lsr #17
+; V7A-T-NEXT:    str r0, [r1]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c7_i32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r0, #17
+; V6M-NEXT:    ldr r2, .LCPI62_0
+; V6M-NEXT:    ands r2, r0
+; V6M-NEXT:    str r2, [r1]
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI62_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i32 %arg, 19
+  %tmp1 = and i32 %tmp0, 1023
+  %tmp2 = shl i32 %tmp1, 2
+  store i32 %tmp2, ptr %ptr
+  ret void
+}
+
+; i64
+
+; The most canonical variant
+define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c5_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    ubfx r1, r1, #19, #10
+; CHECK-NEXT:    strd r1, r0, [r2]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c5_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r0, #0
+; V7A-NEXT:    str r0, [r2, #4]
+; V7A-NEXT:    ubfx r0, r1, #19, #10
+; V7A-NEXT:    str r0, [r2]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c5_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r0, #0
+; V7A-T-NEXT:    ubfx r1, r1, #19, #10
+; V7A-T-NEXT:    strd r1, r0, [r2]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c5_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    lsls r1, r1, #3
+; V6M-NEXT:    lsrs r1, r1, #22
+; V6M-NEXT:    str r1, [r2]
+; V6M-NEXT:    str r0, [r2, #4]
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 1023
+  store i64 %tmp1, ptr %ptr
+  ret void
+}
+
+; Should be still fine, but the mask is shifted
+define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c6_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    ubfx r1, r1, #19, #12
+; CHECK-NEXT:    strd r1, r0, [r2]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c6_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r0, #0
+; V7A-NEXT:    str r0, [r2, #4]
+; V7A-NEXT:    ubfx r0, r1, #19, #12
+; V7A-NEXT:    str r0, [r2]
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c6_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r0, #0
+; V7A-T-NEXT:    ubfx r1, r1, #19, #12
+; V7A-T-NEXT:    strd r1, r0, [r2]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c6_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    lsls r1, r1, #1
+; V6M-NEXT:    lsrs r1, r1, #20
+; V6M-NEXT:    str r1, [r2]
+; V6M-NEXT:    str r0, [r2, #4]
+; V6M-NEXT:    bx lr
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 4095
+  store i64 %tmp1, ptr %ptr
+  ret void
+}
+
+; Should be still fine, but the result is shifted left afterwards
+define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
+; CHECK-LABEL: c7_i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    movw r3, #4092
+; CHECK-NEXT:    and.w r1, r3, r1, lsr #17
+; CHECK-NEXT:    strd r1, r0, [r2]
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: c7_i64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    movw r0, #4092
+; V7A-NEXT:    mov r3, #0
+; V7A-NEXT:    and r0, r0, r1, lsr #17
+; V7A-NEXT:    stm r2, {r0, r3}
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: c7_i64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r0, #0
+; V7A-T-NEXT:    movw r3, #4092
+; V7A-T-NEXT:    and.w r1, r3, r1, lsr #17
+; V7A-T-NEXT:    strd r1, r0, [r2]
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: c7_i64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    lsrs r1, r1, #17
+; V6M-NEXT:    ldr r3, .LCPI65_0
+; V6M-NEXT:    ands r3, r1
+; V6M-NEXT:    str r3, [r2]
+; V6M-NEXT:    str r0, [r2, #4]
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI65_0:
+; V6M-NEXT:    .long 4092 @ 0xffc
+  %tmp0 = lshr i64 %arg, 51
+  %tmp1 = and i64 %tmp0, 1023
+  %tmp2 = shl i64 %tmp1, 2
+  store i64 %tmp2, ptr %ptr
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; V7M: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll
new file mode 100644
index 0000000000000..9bc8c638b63b7
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll
@@ -0,0 +1,2780 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+; *Please* keep in sync with test/CodeGen/X86/extract-lowbits.ll
+
+; https://bugs.llvm.org/show_bug.cgi?id=36419
+; https://bugs.llvm.org/show_bug.cgi?id=37603
+; https://bugs.llvm.org/show_bug.cgi?id=37610
+
+; Patterns:
+;   a) x &  (1 << nbits) - 1
+;   b) x & ~(-1 << nbits)
+;   c) x &  (-1 >> (32 - y))
+;   d) x << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r1, r3, r2, lsl r1
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_a1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r1, r3, r2, lsl r1
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_a1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_a1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_a2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r1, r3, r2, lsl r1
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_a2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_a2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_a3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r1, r3, r2, lsl r1
+; V7A-NEXT:    and r0, r1, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_a3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_a3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_a4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_a4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r1, r3, r2, lsl r1
+; V7A-NEXT:    and r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_a4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_a4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #1
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    subs r1, r2, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    mov.w r12, #1
+; CHECK-NEXT:    subs.w lr, r2, #32
+; CHECK-NEXT:    lsl.w r2, r12, r2
+; CHECK-NEXT:    lsr.w r3, r12, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r12, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    sbc r3, r3, #0
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    lsr lr, r12, r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsl r2, r12, r2
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    lslpl lr, r12, r3
+; V7A-NEXT:    subs r2, r2, #1
+; V7A-NEXT:    sbc r3, lr, #0
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    mov.w r12, #1
+; V7A-T-NEXT:    subs.w lr, r2, #32
+; V7A-T-NEXT:    lsl.w r2, r12, r2
+; V7A-T-NEXT:    lsr.w r3, r12, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r12, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    subs r2, #1
+; V7A-T-NEXT:    sbc r3, r3, #0
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r6, #0
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    sbcs r1, r6
+; V6M-NEXT:    ands r1, r5
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+; Check that we don't throw away the vreg_width-1 mask if not using shifts
+define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a0_masked:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    and r2, r2, #63
+; CHECK-NEXT:    mov.w r12, #1
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    subs.w lr, r2, #32
+; CHECK-NEXT:    lsl.w r2, r12, r2
+; CHECK-NEXT:    lsr.w r3, r12, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r12, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    sbc r3, r3, #0
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a0_masked:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    and r2, r2, #63
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr lr, r12, r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsl r2, r12, r2
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    lslpl lr, r12, r3
+; V7A-NEXT:    subs r2, r2, #1
+; V7A-NEXT:    sbc r3, lr, #0
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a0_masked:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    and r2, r2, #63
+; V7A-T-NEXT:    mov.w r12, #1
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    subs.w lr, r2, #32
+; V7A-T-NEXT:    lsl.w r2, r12, r2
+; V7A-T-NEXT:    lsr.w r3, r12, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r12, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    subs r2, #1
+; V7A-T-NEXT:    sbc r3, r3, #0
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a0_masked:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #63
+; V6M-NEXT:    ands r2, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r6, #0
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    sbcs r1, r6
+; V6M-NEXT:    ands r1, r5
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %numlowbits.masked = and i64 %numlowbits, 63
+  %onebit = shl i64 1, %numlowbits.masked
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    mov.w r12, #1
+; CHECK-NEXT:    subs.w lr, r2, #32
+; CHECK-NEXT:    lsl.w r2, r12, r2
+; CHECK-NEXT:    lsr.w r3, r12, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r12, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    sbc r3, r3, #0
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    lsr lr, r12, r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsl r2, r12, r2
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    lslpl lr, r12, r3
+; V7A-NEXT:    subs r2, r2, #1
+; V7A-NEXT:    sbc r3, lr, #0
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    mov.w r12, #1
+; V7A-T-NEXT:    subs.w lr, r2, #32
+; V7A-T-NEXT:    lsl.w r2, r12, r2
+; V7A-T-NEXT:    lsr.w r3, r12, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r12, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    subs r2, #1
+; V7A-T-NEXT:    sbc r3, r3, #0
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r6, #0
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    sbcs r1, r6
+; V6M-NEXT:    ands r1, r5
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r2, #32
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    subs.w r12, r2, #32
+; CHECK-NEXT:    lsl.w r2, r3, r2
+; CHECK-NEXT:    lsr.w r1, r3, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r3, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    sbc r1, r1, #0
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_a2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r6, r11, lr}
+; V7A-NEXT:    push {r4, r6, r11, lr}
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    mov r1, #1
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    rsb r0, r2, #32
+; V7A-NEXT:    subs r4, r2, #32
+; V7A-NEXT:    lsr r0, r1, r0
+; V7A-NEXT:    lslpl r0, r1, r4
+; V7A-NEXT:    lsl r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    subs r2, r1, #1
+; V7A-NEXT:    sbc r0, r0, #0
+; V7A-NEXT:    and r1, r0, r3
+; V7A-NEXT:    and r0, r2, r6
+; V7A-NEXT:    pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    ldrd r12, lr, [r0]
+; V7A-T-NEXT:    subs.w r0, r2, #32
+; V7A-T-NEXT:    lsr.w r3, r1, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r1, r0
+; V7A-T-NEXT:    lsl.w r0, r1, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs r0, #1
+; V7A-T-NEXT:    sbc r1, r3, #0
+; V7A-T-NEXT:    and.w r0, r0, r12
+; V7A-T-NEXT:    and.w r1, r1, lr
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r5, #0
+; V6M-NEXT:    mov r1, r5
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r2, r0, #1
+; V6M-NEXT:    sbcs r1, r5
+; V6M-NEXT:    ldm r4!, {r0, r3}
+; V6M-NEXT:    ands r1, r3
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %val = load i64, ptr %w
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r2, r1, #32
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    subs.w r12, r1, #32
+; CHECK-NEXT:    lsl.w r1, r3, r1
+; CHECK-NEXT:    lsr.w r2, r3, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r3, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    subs r3, r1, #1
+; CHECK-NEXT:    sbc r1, r2, #0
+; CHECK-NEXT:    ldrd r0, r2, [r0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_a3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r6, r11, lr}
+; V7A-NEXT:    push {r4, r6, r11, lr}
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    mov r2, #1
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    rsb r0, r1, #32
+; V7A-NEXT:    subs r4, r1, #32
+; V7A-NEXT:    lsl r1, r2, r1
+; V7A-NEXT:    lsr r0, r2, r0
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    lslpl r0, r2, r4
+; V7A-NEXT:    subs r2, r1, #1
+; V7A-NEXT:    sbc r0, r0, #0
+; V7A-NEXT:    and r1, r0, r3
+; V7A-NEXT:    and r0, r2, r6
+; V7A-NEXT:    pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r1, #32
+; V7A-T-NEXT:    movs r2, #1
+; V7A-T-NEXT:    ldrd r12, lr, [r0]
+; V7A-T-NEXT:    subs.w r0, r1, #32
+; V7A-T-NEXT:    lsr.w r3, r2, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r2, r0
+; V7A-T-NEXT:    lsl.w r0, r2, r1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    subs r0, #1
+; V7A-T-NEXT:    sbc r1, r3, #0
+; V7A-T-NEXT:    and.w r0, r0, r12
+; V7A-T-NEXT:    and.w r1, r1, lr
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r2, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r5, #0
+; V6M-NEXT:    mov r1, r5
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r2, r0, #1
+; V6M-NEXT:    sbcs r1, r5
+; V6M-NEXT:    ldm r4!, {r0, r3}
+; V6M-NEXT:    ands r1, r3
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %val = load i64, ptr %w
+  %conv = zext i8 %numlowbits to i64
+  %onebit = shl i64 1, %conv
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_a4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    mov.w r12, #1
+; CHECK-NEXT:    subs.w lr, r2, #32
+; CHECK-NEXT:    lsl.w r2, r12, r2
+; CHECK-NEXT:    lsr.w r3, r12, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r12, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    subs r2, #1
+; CHECK-NEXT:    sbc r3, r3, #0
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_a4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    lsr lr, r12, r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsl r2, r12, r2
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    lslpl lr, r12, r3
+; V7A-NEXT:    subs r2, r2, #1
+; V7A-NEXT:    sbc r3, lr, #0
+; V7A-NEXT:    and r0, r0, r2
+; V7A-NEXT:    and r1, r1, r3
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_a4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #32
+; V7A-T-NEXT:    mov.w r12, #1
+; V7A-T-NEXT:    subs.w lr, r2, #32
+; V7A-T-NEXT:    lsl.w r2, r12, r2
+; V7A-T-NEXT:    lsr.w r3, r12, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r3, r12, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    subs r2, #1
+; V7A-T-NEXT:    sbc r3, r3, #0
+; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_a4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, lr}
+; V6M-NEXT:    push {r4, r5, r6, lr}
+; V6M-NEXT:    mov r5, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r6, #0
+; V6M-NEXT:    mov r1, r6
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    sbcs r1, r6
+; V6M-NEXT:    ands r1, r5
+; V6M-NEXT:    ands r0, r4
+; V6M-NEXT:    pop {r4, r5, r6, pc}
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern b. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_b0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    bic r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_b0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_b0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    bx lr
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_b1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    bic r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_b1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_b1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    bx lr
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_b2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    bic r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_b2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_b2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_b3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    bic r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_b3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_b3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %conv = zext i8 %numlowbits to i32
+  %notmask = shl i32 -1, %conv
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_b4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsl.w r1, r2, r1
+; CHECK-NEXT:    bics r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_b4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    bic r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_b4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    bics r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_b4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    bics r0, r2
+; V6M-NEXT:    bx lr
+  %notmask = shl i32 -1, %numlowbits
+  %mask = xor i32 %notmask, -1
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsl.w r12, r3, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl.w r12, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl r3, r2
+; CHECK-NEXT:    bic.w r0, r0, r12
+; CHECK-NEXT:    bics r1, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_b0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_b0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    bic.w r0, r0, r12
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_b0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r5, r0
+; V6M-NEXT:    bics r4, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsl.w r12, r3, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl.w r12, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl r3, r2
+; CHECK-NEXT:    bic.w r0, r0, r12
+; CHECK-NEXT:    bics r1, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_b1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_b1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    bic.w r0, r0, r12
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_b1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r5, r0
+; V6M-NEXT:    bics r4, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r1, #-1
+; CHECK-NEXT:    subs.w r12, r2, #32
+; CHECK-NEXT:    lsl.w r3, r1, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    ldrd r0, r2, [r0]
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r1, r12
+; CHECK-NEXT:    bics r0, r3
+; CHECK-NEXT:    bic.w r1, r2, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_b2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r4, [r0]
+; V7A-NEXT:    mvn r1, #0
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    subs r0, r2, #32
+; V7A-NEXT:    lsl r2, r1, r2
+; V7A-NEXT:    lslpl r1, r1, r0
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r1, r3, r1
+; V7A-NEXT:    bic r0, r4, r2
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bzhi64_b2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r1, #-1
+; V7A-T-NEXT:    ldrd r0, r12, [r0]
+; V7A-T-NEXT:    lsl.w r3, r1, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r1, r2
+; V7A-T-NEXT:    bics r0, r3
+; V7A-T-NEXT:    bic.w r1, r12, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_b2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    ldm r4!, {r2, r3}
+; V6M-NEXT:    bics r2, r0
+; V6M-NEXT:    bics r3, r1
+; V6M-NEXT:    mov r0, r2
+; V6M-NEXT:    mov r1, r3
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    subs.w r12, r1, #32
+; CHECK-NEXT:    lsl.w r3, r2, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    ldrd r0, r1, [r0]
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r2, r12
+; CHECK-NEXT:    bics r1, r2
+; CHECK-NEXT:    bics r0, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_b3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r6, r11, lr}
+; V7A-NEXT:    push {r4, r6, r11, lr}
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    subs r0, r1, #32
+; V7A-NEXT:    lsl r4, r2, r1
+; V7A-NEXT:    lslpl r2, r2, r0
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    bic r1, r3, r2
+; V7A-NEXT:    bic r0, r6, r4
+; V7A-NEXT:    pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_b3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    ldrd r0, r12, [r0]
+; V7A-T-NEXT:    lsl.w r3, r2, r1
+; V7A-T-NEXT:    subs r1, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r2, r1
+; V7A-T-NEXT:    bics r0, r3
+; V7A-T-NEXT:    bic.w r1, r12, r2
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_b3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    mov r2, r1
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    ldm r4!, {r2, r3}
+; V6M-NEXT:    bics r2, r0
+; V6M-NEXT:    bics r3, r1
+; V6M-NEXT:    mov r0, r2
+; V6M-NEXT:    mov r1, r3
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %conv = zext i8 %numlowbits to i64
+  %notmask = shl i64 -1, %conv
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_b4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsl.w r12, r3, r2
+; CHECK-NEXT:    subs r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl.w r12, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl r3, r2
+; CHECK-NEXT:    bic.w r0, r0, r12
+; CHECK-NEXT:    bics r1, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_b4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    bic r1, r1, r3
+; V7A-NEXT:    bic r0, r0, r2
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_b4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    bic.w r0, r0, r12
+; V7A-T-NEXT:    bics r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_b4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    bics r5, r0
+; V6M-NEXT:    bics r4, r1
+; V6M-NEXT:    mov r0, r5
+; V6M-NEXT:    mov r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %notmask = shl i64 -1, %numlowbits
+  %mask = xor i64 %notmask, -1
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern c. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_c0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_c0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_c0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_c1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_c1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_c1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_c2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_c2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_c2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_c3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_c3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_c3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %mask = lshr i32 -1, %sh_prom
+  %masked = and i32 %mask, %val
+  ret i32 %masked
+}
+
+define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_c4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_c4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_c4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsr.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_c4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsrs r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %numhighbits = sub i32 32, %numlowbits
+  %mask = lshr i32 -1, %numhighbits
+  %masked = and i32 %val, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsbs.w lr, r2, #32
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    mov.w r12, #-1
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsr.w r2, r12, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r3, r3, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsbs lr, r2, #32
+; V7A-NEXT:    rsb r2, r2, #64
+; V7A-NEXT:    mvn r12, #0
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r2, r12, r2
+; V7A-NEXT:    lsrpl r3, r3, lr
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    and r1, r2, r1
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsbs.w lr, r2, #32
+; V7A-T-NEXT:    rsb.w r2, r2, #64
+; V7A-T-NEXT:    mov.w r12, #-1
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsr.w r2, r12, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r3, r3, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    ands r0, r3
+; V7A-T-NEXT:    ands r1, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r2, r0, r2
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    uxtb r2, r2
+; CHECK-NEXT:    subs.w r12, r2, #32
+; CHECK-NEXT:    lsr.w r2, r3, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r3, r3, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_c1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb lr, r2, #64
+; V7A-NEXT:    mvn r2, #31
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    uxtb r12, lr
+; V7A-NEXT:    uxtab r2, r2, lr
+; V7A-NEXT:    lsr r12, r3, r12
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    movwpl r12, #0
+; V7A-NEXT:    lsrpl r3, r3, r2
+; V7A-NEXT:    and r1, r12, r1
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w lr, r2, #64
+; V7A-T-NEXT:    mvn r2, #31
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    uxtb.w r12, lr
+; V7A-T-NEXT:    uxtab r2, r2, lr
+; V7A-T-NEXT:    lsr.w r12, r3, r12
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r3, r2
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    ands r0, r3
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r0, r0, r2
+; V6M-NEXT:    uxtb r2, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsbs.w r1, r2, #32
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl r3, r1
+; CHECK-NEXT:    ldrd r0, r1, [r0]
+; CHECK-NEXT:    mov.w r12, #-1
+; CHECK-NEXT:    lsr.w r2, r12, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_c2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r5, lr}
+; V7A-NEXT:    push {r5, lr}
+; V7A-NEXT:    rsbs r1, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    mvn r12, #0
+; V7A-NEXT:    ldm r0, {r0, r5}
+; V7A-NEXT:    lsrpl r3, r3, r1
+; V7A-NEXT:    rsb r1, r2, #64
+; V7A-NEXT:    and r0, r3, r0
+; V7A-NEXT:    lsr r1, r12, r1
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r1, r1, r5
+; V7A-NEXT:    pop {r5, pc}
+;
+; V7A-T-LABEL: bzhi64_c2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsbs.w r1, r2, #32
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    ldrd r0, lr, [r0]
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r3, r1
+; V7A-T-NEXT:    rsb.w r1, r2, #64
+; V7A-T-NEXT:    mov.w r12, #-1
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    lsr.w r1, r12, r1
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, lr
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r2, r0, r2
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldm r4!, {r2, r3}
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    ands r1, r3
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #64
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    subs.w r2, r1, #32
+; CHECK-NEXT:    lsr.w r1, r3, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl r3, r2
+; CHECK-NEXT:    ldrd r0, r2, [r0]
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_c3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, r6, r11, lr}
+; V7A-NEXT:    push {r4, r6, r11, lr}
+; V7A-NEXT:    rsb r1, r1, #64
+; V7A-NEXT:    mvn r4, #31
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    ldr r6, [r0]
+; V7A-NEXT:    ldr r3, [r0, #4]
+; V7A-NEXT:    uxtb r0, r1
+; V7A-NEXT:    uxtab r4, r4, r1
+; V7A-NEXT:    lsr r0, r2, r0
+; V7A-NEXT:    cmp r4, #0
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    and r1, r0, r3
+; V7A-NEXT:    lsrpl r2, r2, r4
+; V7A-NEXT:    and r0, r2, r6
+; V7A-NEXT:    pop {r4, r6, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r1, r1, #64
+; V7A-T-NEXT:    mvn r3, #31
+; V7A-T-NEXT:    ldrd r12, lr, [r0]
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    uxtb r0, r1
+; V7A-T-NEXT:    uxtab r3, r3, r1
+; V7A-T-NEXT:    lsr.w r0, r2, r0
+; V7A-T-NEXT:    cmp r3, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    and.w r1, r0, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl r2, r3
+; V7A-T-NEXT:    and.w r0, r2, r12
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    mov r4, r0
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r0, r0, r1
+; V6M-NEXT:    uxtb r2, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ldm r4!, {r2, r3}
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    ands r1, r3
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %mask = lshr i64 -1, %sh_prom
+  %masked = and i64 %mask, %val
+  ret i64 %masked
+}
+
+define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_c4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsbs.w lr, r2, #32
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    mov.w r12, #-1
+; CHECK-NEXT:    mov.w r3, #-1
+; CHECK-NEXT:    lsr.w r2, r12, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r3, r3, lr
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r2, #0
+; CHECK-NEXT:    ands r0, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_c4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsbs lr, r2, #32
+; V7A-NEXT:    rsb r2, r2, #64
+; V7A-NEXT:    mvn r12, #0
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsr r2, r12, r2
+; V7A-NEXT:    lsrpl r3, r3, lr
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r0, r0, r3
+; V7A-NEXT:    and r1, r1, r2
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_c4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsbs.w lr, r2, #32
+; V7A-T-NEXT:    rsb.w r2, r2, #64
+; V7A-T-NEXT:    mov.w r12, #-1
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsr.w r2, r12, r2
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r3, r3, lr
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r2, #0
+; V7A-T-NEXT:    ands r0, r3
+; V7A-T-NEXT:    ands r1, r2
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_c4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #64
+; V6M-NEXT:    subs r2, r0, r2
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %numhighbits = sub i64 64, %numlowbits
+  %mask = lshr i64 -1, %numhighbits
+  %masked = and i64 %val, %mask ; swapped order
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Pattern d. 32-bit.
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_d0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_d0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_d0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_d1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_d1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_d1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_d2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_d2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_d2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %numhighbits = sub i32 32, %numlowbits
+  %highbitscleared = shl i32 %val, %numhighbits
+  %masked = lshr i32 %highbitscleared, %numhighbits
+  ret i32 %masked
+}
+
+define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi32_d3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #32
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    lsls r0, r1
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_d3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    rsb r1, r1, #32
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_d3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    rsb.w r1, r1, #32
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    uxtb r1, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_d3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #32
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %numhighbits = sub i8 32, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i32
+  %highbitscleared = shl i32 %val, %sh_prom
+  %masked = lshr i32 %highbitscleared, %sh_prom
+  ret i32 %masked
+}
+
+; 64-bit.
+
+define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r3, r2, #64
+; CHECK-NEXT:    rsbs.w r2, r2, #32
+; CHECK-NEXT:    rsb.w lr, r3, #32
+; CHECK-NEXT:    lsl.w r12, r1, r3
+; CHECK-NEXT:    lsr.w r1, r0, lr
+; CHECK-NEXT:    orr.w r1, r1, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, r2
+; CHECK-NEXT:    lsl.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r12, r1, lr
+; CHECK-NEXT:    lsr.w r0, r0, r3
+; CHECK-NEXT:    orr.w r0, r0, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r2
+; CHECK-NEXT:    lsr.w r1, r1, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb lr, r2, #64
+; V7A-NEXT:    rsbs r2, r2, #32
+; V7A-NEXT:    rsb r12, lr, #32
+; V7A-NEXT:    lsr r3, r0, r12
+; V7A-NEXT:    orr r1, r3, r1, lsl lr
+; V7A-NEXT:    lslpl r1, r0, r2
+; V7A-NEXT:    lsl r0, r0, lr
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, lr
+; V7A-NEXT:    orr r0, r0, r1, lsl r12
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    lsr r1, r1, lr
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #64
+; V7A-T-NEXT:    rsbs.w r2, r2, #32
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    lsl.w r12, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r0, lr
+; V7A-T-NEXT:    orr.w r1, r1, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r1, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r12, r1, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    lsr.w r1, r1, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    movs r3, #64
+; V6M-NEXT:    subs r4, r3, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r2, r2, #64
+; CHECK-NEXT:    uxtb r2, r2
+; CHECK-NEXT:    rsb.w r3, r2, #32
+; CHECK-NEXT:    lsl.w r12, r1, r2
+; CHECK-NEXT:    lsr.w r1, r0, r3
+; CHECK-NEXT:    orr.w r1, r1, r12
+; CHECK-NEXT:    subs.w r12, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r1, r0, r12
+; CHECK-NEXT:    lsl.w r0, r0, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r3, r1, r3
+; CHECK-NEXT:    lsr.w r0, r0, r2
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r12
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_d1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r11, lr}
+; V7A-NEXT:    push {r11, lr}
+; V7A-NEXT:    rsb lr, r2, #64
+; V7A-NEXT:    uxtb r3, lr
+; V7A-NEXT:    rsb r12, r3, #32
+; V7A-NEXT:    lsr r2, r0, r12
+; V7A-NEXT:    orr r1, r2, r1, lsl r3
+; V7A-NEXT:    mvn r2, #31
+; V7A-NEXT:    uxtab r2, r2, lr
+; V7A-NEXT:    cmp r2, #0
+; V7A-NEXT:    lslpl r1, r0, r2
+; V7A-NEXT:    lsl r0, r0, r3
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r3
+; V7A-NEXT:    orr r0, r0, r1, lsl r12
+; V7A-NEXT:    lsrpl r0, r1, r2
+; V7A-NEXT:    lsr r1, r1, r3
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r4, r2, #64
+; V7A-T-NEXT:    mvn r2, #31
+; V7A-T-NEXT:    uxtb r3, r4
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    lsl.w r12, r1, r3
+; V7A-T-NEXT:    uxtab r2, r2, r4
+; V7A-T-NEXT:    lsr.w r1, r0, lr
+; V7A-T-NEXT:    cmp r2, #0
+; V7A-T-NEXT:    orr.w r1, r1, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r1, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r4, r1, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    lsr.w r1, r1, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    movs r3, #64
+; V6M-NEXT:    subs r2, r3, r2
+; V6M-NEXT:    uxtb r4, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    rsb.w r1, r2, #64
+; CHECK-NEXT:    ldrd r0, r3, [r0]
+; CHECK-NEXT:    rsb.w lr, r1, #32
+; CHECK-NEXT:    rsbs.w r2, r2, #32
+; CHECK-NEXT:    lsl.w r12, r3, r1
+; CHECK-NEXT:    lsr.w r3, r0, lr
+; CHECK-NEXT:    orr.w r3, r3, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r3, r0, r2
+; CHECK-NEXT:    lsl.w r0, r0, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r12, r3, lr
+; CHECK-NEXT:    lsr.w r0, r0, r1
+; CHECK-NEXT:    lsr.w r1, r3, r1
+; CHECK-NEXT:    orr.w r0, r0, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r3, r2
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    pop {r7, pc}
+;
+; V7A-LABEL: bzhi64_d2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r5, r7, r11, lr}
+; V7A-NEXT:    push {r5, r7, r11, lr}
+; V7A-NEXT:    rsb r3, r2, #64
+; V7A-NEXT:    ldm r0, {r0, r7}
+; V7A-NEXT:    rsb r1, r3, #32
+; V7A-NEXT:    rsbs r2, r2, #32
+; V7A-NEXT:    lsr r5, r0, r1
+; V7A-NEXT:    orr r7, r5, r7, lsl r3
+; V7A-NEXT:    lslpl r7, r0, r2
+; V7A-NEXT:    lsl r0, r0, r3
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r3
+; V7A-NEXT:    orr r0, r0, r7, lsl r1
+; V7A-NEXT:    lsr r1, r7, r3
+; V7A-NEXT:    lsrpl r0, r7, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r7, lr}
+; V7A-T-NEXT:    push {r7, lr}
+; V7A-T-NEXT:    rsb.w r3, r2, #64
+; V7A-T-NEXT:    ldrd r0, r1, [r0]
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    rsbs.w r2, r2, #32
+; V7A-T-NEXT:    lsl.w r12, r1, r3
+; V7A-T-NEXT:    lsr.w r1, r0, lr
+; V7A-T-NEXT:    orr.w r1, r1, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r1, r0, r2
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r12, r1, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r2
+; V7A-T-NEXT:    lsr.w r1, r1, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r7, pc}
+;
+; V6M-LABEL: bzhi64_d2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    movs r1, #64
+; V6M-NEXT:    subs r4, r1, r2
+; V6M-NEXT:    ldr r2, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %numhighbits = sub i64 64, %numlowbits
+  %highbitscleared = shl i64 %val, %numhighbits
+  %masked = lshr i64 %highbitscleared, %numhighbits
+  ret i64 %masked
+}
+
+define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
+; CHECK-LABEL: bzhi64_d3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    rsb.w r1, r1, #64
+; CHECK-NEXT:    ldrd r0, r2, [r0]
+; CHECK-NEXT:    uxtb r1, r1
+; CHECK-NEXT:    rsb.w r3, r1, #32
+; CHECK-NEXT:    lsl.w r12, r2, r1
+; CHECK-NEXT:    lsr.w r2, r0, r3
+; CHECK-NEXT:    orr.w r2, r2, r12
+; CHECK-NEXT:    subs.w r12, r1, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r2, r0, r12
+; CHECK-NEXT:    lsl.w r0, r0, r1
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r0, #0
+; CHECK-NEXT:    lsl.w r3, r2, r3
+; CHECK-NEXT:    lsr.w r0, r0, r1
+; CHECK-NEXT:    lsr.w r1, r2, r1
+; CHECK-NEXT:    orr.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r2, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_d3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r5, r7, r11, lr}
+; V7A-NEXT:    push {r5, r7, r11, lr}
+; V7A-NEXT:    rsb r1, r1, #64
+; V7A-NEXT:    ldm r0, {r0, r7}
+; V7A-NEXT:    uxtb r2, r1
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    lsr r5, r0, r3
+; V7A-NEXT:    orr r7, r5, r7, lsl r2
+; V7A-NEXT:    mvn r5, #31
+; V7A-NEXT:    uxtab r1, r5, r1
+; V7A-NEXT:    cmp r1, #0
+; V7A-NEXT:    lslpl r7, r0, r1
+; V7A-NEXT:    lsl r0, r0, r2
+; V7A-NEXT:    movwpl r0, #0
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    orr r0, r0, r7, lsl r3
+; V7A-NEXT:    lsrpl r0, r7, r1
+; V7A-NEXT:    lsr r1, r7, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    pop {r5, r7, r11, pc}
+;
+; V7A-T-LABEL: bzhi64_d3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    rsb.w r4, r1, #64
+; V7A-T-NEXT:    ldrd r0, r2, [r0]
+; V7A-T-NEXT:    mvn r1, #31
+; V7A-T-NEXT:    uxtb r3, r4
+; V7A-T-NEXT:    rsb.w lr, r3, #32
+; V7A-T-NEXT:    lsl.w r12, r2, r3
+; V7A-T-NEXT:    uxtab r1, r1, r4
+; V7A-T-NEXT:    lsr.w r2, r0, lr
+; V7A-T-NEXT:    cmp r1, #0
+; V7A-T-NEXT:    orr.w r2, r2, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r2, r0, r1
+; V7A-T-NEXT:    lsl.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r0, #0
+; V7A-T-NEXT:    lsl.w r4, r2, lr
+; V7A-T-NEXT:    lsr.w r0, r0, r3
+; V7A-T-NEXT:    orr.w r0, r0, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r2, r1
+; V7A-T-NEXT:    lsr.w r1, r2, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bzhi64_d3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, lr}
+; V6M-NEXT:    push {r4, lr}
+; V6M-NEXT:    movs r2, #64
+; V6M-NEXT:    subs r1, r2, r1
+; V6M-NEXT:    uxtb r4, r1
+; V6M-NEXT:    ldr r2, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    mov r0, r2
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r2, r4
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    pop {r4, pc}
+  %val = load i64, ptr %w
+  %numhighbits = sub i8 64, %numlowbits
+  %sh_prom = zext i8 %numhighbits to i64
+  %highbitscleared = shl i64 %val, %sh_prom
+  %masked = lshr i64 %highbitscleared, %sh_prom
+  ret i64 %masked
+}
+
+; ---------------------------------------------------------------------------- ;
+; Constant mask
+; ---------------------------------------------------------------------------- ;
+
+; 32-bit
+
+define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    bic r0, r0, #-2147483648
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    bic r0, r0, #-2147483648
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    bic r0, r0, #-2147483648
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r1, #31
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    bx lr
+  %masked = and i32 %val, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask32_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    bic r0, r0, #-2147483648
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask32_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    bic r0, r0, #-2147483648
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask32_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    bic r0, r0, #-2147483648
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask32_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r1, #31
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    bx lr
+  %val1 = load i32, ptr %val
+  %masked = and i32 %val1, 2147483647
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    bfc r0, #15, #17
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    bfc r0, #15, #17
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    bfc r0, #15, #17
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, .LCPI41_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI41_0:
+; V6M-NEXT:    .long 32767 @ 0x7fff
+  %masked = and i32 %val, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask16_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    bfc r0, #15, #17
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask16_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    bfc r0, #15, #17
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask16_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    bfc r0, #15, #17
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask16_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, [r0]
+; V6M-NEXT:    ldr r0, .LCPI42_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI42_0:
+; V6M-NEXT:    .long 32767 @ 0x7fff
+  %val1 = load i32, ptr %val
+  %masked = and i32 %val1, 32767
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    and r0, r0, #127
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    and r0, r0, #127
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    and r0, r0, #127
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #127
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %masked = and i32 %val, 127
+  ret i32 %masked
+}
+
+define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi32_constant_mask8_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    and r0, r0, #127
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi32_constant_mask8_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    and r0, r0, #127
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi32_constant_mask8_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    and r0, r0, #127
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi32_constant_mask8_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, [r0]
+; V6M-NEXT:    movs r0, #127
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %val1 = load i32, ptr %val
+  %masked = and i32 %val1, 127
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    bic r1, r1, #-1073741824
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    bic r1, r1, #-1073741824
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    bic r1, r1, #-1073741824
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #3
+; V6M-NEXT:    lsls r2, r2, #30
+; V6M-NEXT:    bics r1, r2
+; V6M-NEXT:    bx lr
+  %masked = and i64 %val, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask64_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldrd r0, r1, [r0]
+; CHECK-NEXT:    bic r1, r1, #-1073741824
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask64_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldrd r0, r1, [r0]
+; V7A-NEXT:    bic r1, r1, #-1073741824
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask64_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldrd r0, r1, [r0]
+; V7A-T-NEXT:    bic r1, r1, #-1073741824
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask64_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #3
+; V6M-NEXT:    lsls r3, r1, #30
+; V6M-NEXT:    ldr r2, [r0]
+; V6M-NEXT:    ldr r1, [r0, #4]
+; V6M-NEXT:    bics r1, r3
+; V6M-NEXT:    mov r0, r2
+; V6M-NEXT:    bx lr
+  %val1 = load i64, ptr %val
+  %masked = and i64 %val1, 4611686018427387903
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    bic r0, r0, #-2147483648
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    bic r0, r0, #-2147483648
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    bic r0, r0, #-2147483648
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r1, #31
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %masked = and i64 %val, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask32_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bic r0, r0, #-2147483648
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask32_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bic r0, r0, #-2147483648
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask32_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bic r0, r0, #-2147483648
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask32_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r1, #31
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    bics r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %val1 = load i64, ptr %val
+  %masked = and i64 %val1, 2147483647
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    bfc r0, #15, #17
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    bfc r0, #15, #17
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    bfc r0, #15, #17
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, .LCPI49_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI49_0:
+; V6M-NEXT:    .long 32767 @ 0x7fff
+  %masked = and i64 %val, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask16_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bfc r0, #15, #17
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask16_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bfc r0, #15, #17
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask16_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bfc r0, #15, #17
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask16_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, [r0]
+; V6M-NEXT:    ldr r0, .LCPI50_0
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+; V6M-NEXT:    .p2align 2
+; V6M-NEXT:  @ %bb.1:
+; V6M-NEXT:  .LCPI50_0:
+; V6M-NEXT:    .long 32767 @ 0x7fff
+  %val1 = load i64, ptr %val
+  %masked = and i64 %val1, 32767
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    and r0, r0, #127
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    and r0, r0, #127
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    and r0, r0, #127
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r1, #127
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %masked = and i64 %val, 127
+  ret i64 %masked
+}
+
+define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
+; CHECK-LABEL: bzhi64_constant_mask8_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    and r0, r0, #127
+; CHECK-NEXT:    bx lr
+;
+; V7A-LABEL: bzhi64_constant_mask8_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r1, #0
+; V7A-NEXT:    and r0, r0, #127
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bzhi64_constant_mask8_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    movs r1, #0
+; V7A-T-NEXT:    and r0, r0, #127
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bzhi64_constant_mask8_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r1, [r0]
+; V6M-NEXT:    movs r0, #127
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    movs r1, #0
+; V6M-NEXT:    bx lr
+  %val1 = load i64, ptr %val
+  %masked = and i64 %val1, 127
+  ret i64 %masked
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; V7M: {{.*}}

>From 94fb424a519de9d052a5fb0810dc3e7a809a262e Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 11 Sep 2025 09:14:25 -0400
Subject: [PATCH 2/2] [ARM] shouldFoldMaskToVariableShiftPair should be true
 for scalars up to the biggest legal type

For ARM, we want to do this for scalars up to the biggest legal type.
---
 llvm/lib/Target/ARM/ARMISelLowering.h      |   10 +
 llvm/test/CodeGen/ARM/and-mask-variable.ll |   20 +-
 llvm/test/CodeGen/ARM/extract-bits.ll      | 1221 ++++++++++++++++++--
 llvm/test/CodeGen/ARM/extract-lowbits.ll   |  671 ++++++++++-
 4 files changed, 1776 insertions(+), 146 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 196ecb1b9f678..fa8cc9d21c5e5 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -773,6 +773,16 @@ class VectorType;
     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                            CombineLevel Level) const override;
 
+    /// Return true if it is profitable to fold a pair of shifts into a mask.
+    bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+      EVT VT = Y.getValueType();
+
+      if (VT.isVector())
+        return false;
+
+      return VT.getScalarSizeInBits() <= 32;
+    }
+
     bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
                                               unsigned SelectOpcode, SDValue X,
                                               SDValue Y) const override;
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
index 0b57fe278bf6e..0f84b76f97a6b 100644
--- a/llvm/test/CodeGen/ARM/and-mask-variable.ll
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -7,30 +7,26 @@
 define i32 @mask_pair(i32 %x, i32 %y) {
 ; V7M-LABEL: mask_pair:
 ; V7M:       @ %bb.0:
-; V7M-NEXT:    mov.w r2, #-1
-; V7M-NEXT:    lsl.w r1, r2, r1
-; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsls r0, r1
 ; V7M-NEXT:    bx lr
 ;
 ; V7A-LABEL: mask_pair:
 ; V7A:       @ %bb.0:
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsl r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    lsl r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: mask_pair:
 ; V7A-T:       @ %bb.0:
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsl.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsls r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: mask_pair:
 ; V6M:       @ %bb.0:
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsls r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    lsls r0, r1
 ; V6M-NEXT:    bx lr
   %shl = shl nsw i32 -1, %y
   %and = and i32 %shl, %x
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
index 86fc0d3d3781a..237c8bc7d2906 100644
--- a/llvm/test/CodeGen/ARM/extract-bits.ll
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -31,6 +31,15 @@ define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r12, #1
@@ -73,6 +82,15 @@ define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) n
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a0_arithmetic:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    asrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a0_arithmetic:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r12, #1
@@ -115,6 +133,15 @@ define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r12, #1
@@ -160,6 +187,16 @@ define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -207,6 +244,16 @@ define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -255,6 +302,15 @@ define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_a4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_a4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r12, #1
@@ -320,6 +376,36 @@ define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    and.w r1, r1, r12
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w r4, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r4
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_a0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -438,6 +524,36 @@ define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) n
 ; CHECK-NEXT:    and.w r1, r12, r2
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_a0_arithmetic:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w r4, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    asrpl.w r0, r1, r4
+; V7M-NEXT:    asr.w r2, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    asrpl r2, r1, #31
+; V7M-NEXT:    and.w r1, r12, r2
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_a0_arithmetic:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -555,6 +671,35 @@ define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    and.w r1, r1, r12
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_a1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    rsb.w r4, r3, #32
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    subs.w r12, r3, #32
+; V7M-NEXT:    lsl.w r3, lr, r3
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    lsr.w r0, r0, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r4
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_a1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -674,6 +819,37 @@ define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    and.w r1, r1, r12
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_a2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    rsb.w r1, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r1, lr, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs.w lr, r3, #1
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    sbc r12, r1, #0
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsl.w r1, r3, r1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    orrs r0, r1
+; V7M-NEXT:    subs.w r1, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r3, r1
+; V7M-NEXT:    lsr.w r1, r3, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r0, r0, lr
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_a2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r6, lr}
@@ -794,6 +970,36 @@ define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; CHECK-NEXT:    and.w r1, r1, r12
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_a3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    mov.w r12, #1
+; V7M-NEXT:    subs.w lr, r2, #32
+; V7M-NEXT:    lsl.w r2, r12, r2
+; V7M-NEXT:    lsr.w r3, r12, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r12, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs.w lr, r2, #1
+; V7M-NEXT:    ldrd r0, r2, [r0]
+; V7M-NEXT:    sbc r12, r3, #0
+; V7M-NEXT:    rsb.w r3, r1, #32
+; V7M-NEXT:    lsl.w r3, r2, r3
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r1, #32
+; V7M-NEXT:    lsr.w r1, r2, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r2, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r0, r0, lr
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_a3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r6, lr}
@@ -915,6 +1121,36 @@ define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; CHECK-NEXT:    and.w r1, r1, r12
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_a4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w r4, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r4
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_a4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -1025,6 +1261,25 @@ define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsls r2, r1
+; V7M-NEXT:    subs r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r1, r2, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_a0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -1099,6 +1354,22 @@ define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_a1:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_a1:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1169,6 +1440,22 @@ define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_a2:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_a2:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1234,6 +1521,14 @@ define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_b0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_b0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r0, r0, r1
@@ -1273,6 +1568,14 @@ define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_b1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_b1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r0, r0, r1
@@ -1315,6 +1618,15 @@ define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_b2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_b2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -1359,6 +1671,15 @@ define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_b3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_b3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -1404,6 +1725,14 @@ define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_b4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_b4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r0, r0, r1
@@ -1463,6 +1792,32 @@ define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_b0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsl.w r3, r2, r12
+; V7M-NEXT:    subs.w lr, r12, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r2, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    bics r1, r2
+; V7M-NEXT:    bics r0, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_b0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1557,6 +1912,29 @@ define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    bic.w r0, r12, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_b1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsr.w r12, r0, r2
+; V7M-NEXT:    rsb.w r0, r2, #32
+; V7M-NEXT:    lsl.w r0, r1, r0
+; V7M-NEXT:    orr.w r12, r12, r0
+; V7M-NEXT:    subs.w r0, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r12, r1, r0
+; V7M-NEXT:    lsr.w r0, r1, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    subs.w r1, r3, #32
+; V7M-NEXT:    lsl.w r3, r2, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r2, r1
+; V7M-NEXT:    bic.w r1, r0, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    bic.w r0, r12, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_b1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r12, r0, r2
@@ -1653,6 +2031,33 @@ define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    bics r0, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_b2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsl.w r1, r3, r1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    orrs r0, r1
+; V7M-NEXT:    subs.w r1, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r3, r1
+; V7M-NEXT:    lsr.w r1, r3, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsl.w r3, r2, r12
+; V7M-NEXT:    subs.w lr, r12, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r2, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    bics r1, r2
+; V7M-NEXT:    bics r0, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_b2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1757,6 +2162,32 @@ define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; CHECK-NEXT:    bic.w r0, r12, r2
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_b3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    ldrd r12, r0, [r0]
+; V7M-NEXT:    rsb.w r3, r1, #32
+; V7M-NEXT:    lsl.w lr, r0, r3
+; V7M-NEXT:    lsr.w r3, r12, r1
+; V7M-NEXT:    orr.w r12, r3, lr
+; V7M-NEXT:    subs.w r3, r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r12, r0, r3
+; V7M-NEXT:    lsr.w r0, r0, r1
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    subs.w r1, r2, #32
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r1
+; V7M-NEXT:    bic.w r1, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    bic.w r0, r12, r2
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_b3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldm r0, {r0, r3}
@@ -1861,6 +2292,32 @@ define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; CHECK-NEXT:    bics r0, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_b4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsl.w r3, r2, r12
+; V7M-NEXT:    subs.w lr, r12, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r2, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    bics r1, r2
+; V7M-NEXT:    bics r0, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_b4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1953,6 +2410,24 @@ define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_b0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldrb.w r1, [sp]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsls r2, r1
+; V7M-NEXT:    subs r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    bics r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_b0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -2027,6 +2502,21 @@ define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_b1:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldrb.w r1, [sp]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_b1:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -2093,6 +2583,21 @@ define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_b2:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldrb.w r1, [sp]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_b2:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -2156,32 +2661,37 @@ define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_c0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_c0:
 ; V7A:       @ %bb.0:
-; V7A-NEXT:    rsb r2, r2, #32
-; V7A-NEXT:    mvn r3, #0
-; V7A-NEXT:    lsr r2, r3, r2
-; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr32_c0:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    rsb.w r1, r2, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr32_c0:
 ; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #32
+; V6M-NEXT:    subs r2, r3, r2
 ; V6M-NEXT:    lsrs r0, r1
-; V6M-NEXT:    movs r1, #32
-; V6M-NEXT:    subs r1, r1, r2
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    lsrs r0, r2
 ; V6M-NEXT:    bx lr
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
@@ -2202,14 +2712,24 @@ define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_c1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_c1_indexzext:
 ; V7A:       @ %bb.0:
-; V7A-NEXT:    rsb r2, r2, #32
-; V7A-NEXT:    mvn r3, #0
 ; V7A-NEXT:    uxtb r1, r1
-; V7A-NEXT:    uxtb r2, r2
-; V7A-NEXT:    lsr r2, r3, r2
-; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr32_c1_indexzext:
@@ -2217,10 +2737,9 @@ define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; V7A-T-NEXT:    uxtb r1, r1
 ; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    rsb.w r1, r2, #32
-; V7A-T-NEXT:    mov.w r2, #-1
 ; V7A-T-NEXT:    uxtb r1, r1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr32_c1_indexzext:
@@ -2230,10 +2749,8 @@ define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; V6M-NEXT:    movs r1, #32
 ; V6M-NEXT:    subs r1, r1, r2
 ; V6M-NEXT:    uxtb r1, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
@@ -2255,35 +2772,41 @@ define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_c2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_c2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
-; V7A-NEXT:    rsb r2, r2, #32
-; V7A-NEXT:    mvn r3, #0
-; V7A-NEXT:    lsr r2, r3, r2
-; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr32_c2_load:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    ldr r0, [r0]
-; V7A-T-NEXT:    rsb.w r2, r2, #32
-; V7A-T-NEXT:    mov.w r3, #-1
-; V7A-T-NEXT:    lsr.w r2, r3, r2
 ; V7A-T-NEXT:    lsrs r0, r1
-; V7A-T-NEXT:    ands r0, r2
+; V7A-T-NEXT:    rsb.w r1, r2, #32
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr32_c2_load:
 ; V6M:       @ %bb.0:
-; V6M-NEXT:    ldr r3, [r0]
-; V6M-NEXT:    lsrs r3, r1
-; V6M-NEXT:    movs r0, #32
-; V6M-NEXT:    subs r1, r0, r2
-; V6M-NEXT:    movs r0, #0
-; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    movs r3, #32
+; V6M-NEXT:    subs r2, r3, r2
+; V6M-NEXT:    ldr r0, [r0]
 ; V6M-NEXT:    lsrs r0, r1
-; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    lsrs r0, r2
 ; V6M-NEXT:    bx lr
   %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
@@ -2306,15 +2829,26 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_c3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_c3_load_indexzext:
 ; V7A:       @ %bb.0:
-; V7A-NEXT:    rsb r2, r2, #32
 ; V7A-NEXT:    ldr r0, [r0]
-; V7A-NEXT:    mvn r3, #0
 ; V7A-NEXT:    uxtb r1, r1
-; V7A-NEXT:    uxtb r2, r2
-; V7A-NEXT:    lsr r2, r3, r2
-; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    rsb r1, r2, #32
+; V7A-NEXT:    uxtb r1, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr32_c3_load_indexzext:
@@ -2323,24 +2857,21 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; V7A-T-NEXT:    uxtb r1, r1
 ; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    rsb.w r1, r2, #32
-; V7A-T-NEXT:    mov.w r2, #-1
 ; V7A-T-NEXT:    uxtb r1, r1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr32_c3_load_indexzext:
 ; V6M:       @ %bb.0:
 ; V6M-NEXT:    uxtb r1, r1
-; V6M-NEXT:    ldr r3, [r0]
-; V6M-NEXT:    lsrs r3, r1
-; V6M-NEXT:    movs r0, #32
-; V6M-NEXT:    subs r0, r0, r2
-; V6M-NEXT:    uxtb r1, r0
-; V6M-NEXT:    movs r0, #0
-; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    ldr r0, [r0]
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #32
+; V6M-NEXT:    subs r1, r1, r2
+; V6M-NEXT:    uxtb r1, r1
+; V6M-NEXT:    lsls r0, r1
 ; V6M-NEXT:    lsrs r0, r1
-; V6M-NEXT:    ands r0, r3
 ; V6M-NEXT:    bx lr
   %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
@@ -2362,32 +2893,37 @@ define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_c4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_c4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    rsb r1, r2, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr32_c4_commutative:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    rsb.w r1, r2, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr32_c4_commutative:
 ; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r3, #32
+; V6M-NEXT:    subs r2, r3, r2
 ; V6M-NEXT:    lsrs r0, r1
-; V6M-NEXT:    movs r1, #32
-; V6M-NEXT:    subs r1, r1, r2
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    lsrs r0, r2
 ; V6M-NEXT:    bx lr
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
@@ -2424,6 +2960,31 @@ define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_c0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    ldr.w r12, [sp]
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    rsb.w r3, r12, #64
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsr.w r3, r2, r3
+; V7M-NEXT:    rsbs.w r12, r12, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r2, r2, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_c0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r11, lr}
@@ -2528,6 +3089,34 @@ define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_c1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    uxtb r2, r2
+; V7M-NEXT:    lsr.w r12, r0, r2
+; V7M-NEXT:    rsb.w r0, r2, #32
+; V7M-NEXT:    lsl.w r0, r1, r0
+; V7M-NEXT:    orr.w r12, r12, r0
+; V7M-NEXT:    subs.w r0, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r12, r1, r0
+; V7M-NEXT:    rsb.w r0, r3, #64
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    uxtb r0, r0
+; V7M-NEXT:    subs.w lr, r0, #32
+; V7M-NEXT:    lsr.w r2, r3, r0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r3, r3, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    and.w r0, r3, r12
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_c1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -2640,6 +3229,32 @@ define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_c2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    ldr.w r12, [sp]
+; V7M-NEXT:    lsl.w r1, r3, r1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    orrs r0, r1
+; V7M-NEXT:    subs.w r1, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r3, r1
+; V7M-NEXT:    lsr.w r1, r3, r2
+; V7M-NEXT:    rsb.w r3, r12, #64
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    rsbs.w r12, r12, #32
+; V7M-NEXT:    lsr.w r3, r2, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r2, r2, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_c2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r6, r8, lr}
@@ -2748,6 +3363,35 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bextr64_c3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsr.w r12, r0, r1
+; V7M-NEXT:    rsb.w r0, r1, #32
+; V7M-NEXT:    lsl.w r0, r3, r0
+; V7M-NEXT:    orr.w r12, r12, r0
+; V7M-NEXT:    subs.w r0, r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r12, r3, r0
+; V7M-NEXT:    rsb.w r0, r2, #64
+; V7M-NEXT:    lsr.w r1, r3, r1
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    uxtb r0, r0
+; V7M-NEXT:    subs.w lr, r0, #32
+; V7M-NEXT:    lsr.w r2, r3, r0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r3, r3, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    and.w r0, r3, r12
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bextr64_c3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -2867,6 +3511,31 @@ define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; CHECK-NEXT:    ands r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_c4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    ldr.w r12, [sp]
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    rsb.w r3, r12, #64
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsr.w r3, r2, r3
+; V7M-NEXT:    rsbs.w r12, r12, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r2, r2, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_c4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r11, lr}
@@ -2963,6 +3632,23 @@ define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_c0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    rsbs.w r1, r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl r2, r1
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_c0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r3, [sp]
@@ -3035,6 +3721,21 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_c1:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_c1:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -3044,8 +3745,8 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V7A-NEXT:    orr r0, r0, r1, lsl r3
 ; V7A-NEXT:    lsrpl r0, r1, r2
 ; V7A-NEXT:    rsb r1, r12, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr64_32_c1:
@@ -3059,9 +3760,8 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V7A-T-NEXT:    it pl
 ; V7A-T-NEXT:    lsrpl.w r0, r1, r2
 ; V7A-T-NEXT:    rsb.w r1, r12, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr64_32_c1:
@@ -3072,10 +3772,8 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V6M-NEXT:    ldr r1, [sp, #8]
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    pop {r7, pc}
   %shifted = lshr i64 %val, %numskipbits
   %truncshifted = trunc i64 %shifted to i32
@@ -3104,6 +3802,21 @@ define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_c2:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_c2:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -3113,8 +3826,8 @@ define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V7A-NEXT:    orr r0, r0, r1, lsl r3
 ; V7A-NEXT:    lsrpl r0, r1, r2
 ; V7A-NEXT:    rsb r1, r12, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bextr64_32_c2:
@@ -3128,9 +3841,8 @@ define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V7A-T-NEXT:    it pl
 ; V7A-T-NEXT:    lsrpl.w r0, r1, r2
 ; V7A-T-NEXT:    rsb.w r1, r12, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bextr64_32_c2:
@@ -3141,10 +3853,8 @@ define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; V6M-NEXT:    ldr r1, [sp, #8]
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    pop {r7, pc}
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
@@ -3168,6 +3878,14 @@ define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_d0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_d0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    lsr r0, r0, r1
@@ -3210,6 +3928,16 @@ define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_d1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_d1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    uxtb r1, r1
@@ -3259,6 +3987,15 @@ define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_d2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_d2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -3306,6 +4043,17 @@ define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr32_d3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr32_d3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -3389,6 +4137,43 @@ define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_d0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    rsb.w r3, r12, #64
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    rsb.w lr, r12, #32
+; V7M-NEXT:    rsb.w r12, r3, #32
+; V7M-NEXT:    lsls r1, r3
+; V7M-NEXT:    cmp.w lr, #0
+; V7M-NEXT:    lsr.w r4, r0, r12
+; V7M-NEXT:    orr.w r1, r1, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, lr
+; V7M-NEXT:    lsl.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r2, r1, r12
+; V7M-NEXT:    lsr.w r0, r0, r3
+; V7M-NEXT:    orr.w r0, r0, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, lr
+; V7M-NEXT:    lsr.w r1, r1, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_d0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -3511,6 +4296,44 @@ define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_d1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    uxtb.w lr, r2
+; V7M-NEXT:    subs.w r2, lr, #32
+; V7M-NEXT:    lsr.w r12, r0, lr
+; V7M-NEXT:    rsb.w r0, lr, #32
+; V7M-NEXT:    lsl.w r0, r1, r0
+; V7M-NEXT:    orr.w r0, r0, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    rsb.w r2, r3, #64
+; V7M-NEXT:    lsr.w r1, r1, lr
+; V7M-NEXT:    uxtb r2, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    rsb.w r12, r2, #32
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    sub.w r3, r2, #32
+; V7M-NEXT:    lsr.w r4, r0, r12
+; V7M-NEXT:    orrs r1, r4
+; V7M-NEXT:    cmp r3, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, r3
+; V7M-NEXT:    lsl.w r0, r0, r2
+; V7M-NEXT:    lsl.w r4, r1, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsr.w r0, r0, r2
+; V7M-NEXT:    orr.w r0, r0, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_d1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r11, lr}
@@ -3645,6 +4468,44 @@ define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_d2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsl.w r1, r3, r1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w lr, r12, #32
+; V7M-NEXT:    orrs r0, r1
+; V7M-NEXT:    subs.w r1, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r3, r1
+; V7M-NEXT:    rsb.w r1, r12, #64
+; V7M-NEXT:    lsr.w r2, r3, r2
+; V7M-NEXT:    rsb.w r12, r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    cmp.w lr, #0
+; V7M-NEXT:    lsl.w r2, r2, r1
+; V7M-NEXT:    lsr.w r4, r0, r12
+; V7M-NEXT:    orr.w r2, r2, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r0, lr
+; V7M-NEXT:    lsl.w r0, r0, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r3, r2, r12
+; V7M-NEXT:    lsr.w r0, r0, r1
+; V7M-NEXT:    lsr.w r1, r2, r1
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r2, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_d2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -3774,6 +4635,45 @@ define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_d3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldrd r0, lr, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    subs.w r3, r1, #32
+; V7M-NEXT:    lsr.w r12, r0, r1
+; V7M-NEXT:    rsb.w r0, r1, #32
+; V7M-NEXT:    lsr.w r1, lr, r1
+; V7M-NEXT:    uxtb r2, r2
+; V7M-NEXT:    lsl.w r0, lr, r0
+; V7M-NEXT:    orr.w r0, r0, r12
+; V7M-NEXT:    rsb.w r12, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, lr, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    sub.w r3, r2, #32
+; V7M-NEXT:    lsr.w r4, r0, r12
+; V7M-NEXT:    orrs r1, r4
+; V7M-NEXT:    cmp r3, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, r3
+; V7M-NEXT:    lsl.w r0, r0, r2
+; V7M-NEXT:    lsl.w r4, r1, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsr.w r0, r0, r2
+; V7M-NEXT:    orr.w r0, r0, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_d3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r5, r11, lr}
@@ -3915,6 +4815,40 @@ define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    lsrpl.w r0, r1, lr
 ; CHECK-NEXT:    pop {r4, pc}
 ;
+; V7M-LABEL: bextr64_32_d0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orrs r0, r3
+; V7M-NEXT:    subs.w r3, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r3
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    rsb.w r3, r12, #64
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    rsb.w lr, r12, #32
+; V7M-NEXT:    rsb.w r12, r3, #32
+; V7M-NEXT:    lsls r1, r3
+; V7M-NEXT:    cmp.w lr, #0
+; V7M-NEXT:    lsr.w r4, r0, r12
+; V7M-NEXT:    orr.w r1, r1, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, lr
+; V7M-NEXT:    lsl.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r2, r1, r12
+; V7M-NEXT:    lsr.w r0, r0, r3
+; V7M-NEXT:    orr.w r0, r0, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, lr
+; V7M-NEXT:    pop {r4, pc}
+;
 ; V7A-LABEL: bextr64_32_d0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -4011,6 +4945,21 @@ define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bextr64_32_d1:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    ldr r1, [sp]
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bextr64_32_d1:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r3, r2, #32
@@ -4073,6 +5022,15 @@ define void @pr38938(ptr %a0, ptr %a1) nounwind {
 ; CHECK-NEXT:    str.w r2, [r0, r1, lsl #2]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: pr38938:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r1, [r1]
+; V7M-NEXT:    ubfx r1, r1, #21, #10
+; V7M-NEXT:    ldr.w r2, [r0, r1, lsl #2]
+; V7M-NEXT:    adds r2, #1
+; V7M-NEXT:    str.w r2, [r0, r1, lsl #2]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: pr38938:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r1, [r1]
@@ -4122,6 +5080,11 @@ define i32 @c0_i32(i32 %arg) nounwind {
 ; CHECK-NEXT:    ubfx r0, r0, #19, #10
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c0_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ubfx r0, r0, #19, #10
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c0_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ubfx r0, r0, #19, #10
@@ -4150,6 +5113,12 @@ define i32 @c1_i32(i32 %arg) nounwind {
 ; CHECK-NEXT:    and.w r0, r1, r0, lsr #19
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c1_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movw r1, #4092
+; V7M-NEXT:    and.w r0, r1, r0, lsr #19
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c1_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r1, #4092
@@ -4185,6 +5154,12 @@ define i32 @c2_i32(i32 %arg) nounwind {
 ; CHECK-NEXT:    and.w r0, r1, r0, lsr #17
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c2_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movw r1, #4092
+; V7M-NEXT:    and.w r0, r1, r0, lsr #17
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c2_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r1, #4092
@@ -4221,6 +5196,12 @@ define i32 @c4_i32_bad(i32 %arg) nounwind {
 ; CHECK-NEXT:    and.w r0, r1, r0, lsr #19
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c4_i32_bad:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mvn r1, #1
+; V7M-NEXT:    and.w r0, r1, r0, lsr #19
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c4_i32_bad:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mvn r1, #1
@@ -4253,6 +5234,12 @@ define i64 @c0_i64(i64 %arg) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c0_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ubfx r0, r1, #19, #10
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c0_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ubfx r0, r1, #19, #10
@@ -4285,6 +5272,13 @@ define i64 @c1_i64(i64 %arg) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c1_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movw r0, #4092
+; V7M-NEXT:    and.w r0, r0, r1, lsr #19
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c1_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r0, #4092
@@ -4324,6 +5318,13 @@ define i64 @c2_i64(i64 %arg) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c2_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movw r0, #4092
+; V7M-NEXT:    and.w r0, r0, r1, lsr #17
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c2_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r0, #4092
@@ -4364,6 +5365,13 @@ define i64 @c4_i64_bad(i64 %arg) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c4_i64_bad:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mvn r0, #1
+; V7M-NEXT:    and.w r0, r0, r1, lsr #19
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c4_i64_bad:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mvn r0, #1
@@ -4403,6 +5411,12 @@ define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c5_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ubfx r0, r0, #19, #10
+; V7M-NEXT:    str r0, [r1]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c5_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ubfx r0, r0, #19, #10
@@ -4435,6 +5449,12 @@ define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c6_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ubfx r0, r0, #19, #12
+; V7M-NEXT:    str r0, [r1]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c6_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ubfx r0, r0, #19, #12
@@ -4468,6 +5488,13 @@ define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c7_i32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movw r2, #4092
+; V7M-NEXT:    and.w r0, r2, r0, lsr #17
+; V7M-NEXT:    str r0, [r1]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c7_i32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r2, #4092
@@ -4511,6 +5538,13 @@ define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    strd r1, r0, [r2]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c5_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r0, #0
+; V7M-NEXT:    ubfx r1, r1, #19, #10
+; V7M-NEXT:    strd r1, r0, [r2]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c5_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r0, #0
@@ -4549,6 +5583,13 @@ define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    strd r1, r0, [r2]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c6_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r0, #0
+; V7M-NEXT:    ubfx r1, r1, #19, #12
+; V7M-NEXT:    strd r1, r0, [r2]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c6_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r0, #0
@@ -4588,6 +5629,14 @@ define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-NEXT:    strd r1, r0, [r2]
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: c7_i64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r0, #0
+; V7M-NEXT:    movw r3, #4092
+; V7M-NEXT:    and.w r1, r3, r1, lsr #17
+; V7M-NEXT:    strd r1, r0, [r2]
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: c7_i64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    movw r0, #4092
@@ -4623,5 +5672,3 @@ define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
   store i64 %tmp2, ptr %ptr
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; V7M: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll
index 9bc8c638b63b7..96afc6302ea8a 100644
--- a/llvm/test/CodeGen/ARM/extract-lowbits.ll
+++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll
@@ -30,6 +30,14 @@ define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_a0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r2, #1
@@ -68,6 +76,14 @@ define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_a1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_a1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r2, #1
@@ -108,6 +124,15 @@ define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_a2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_a2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r2, #1
@@ -151,6 +176,15 @@ define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_a3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_a3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r2, #1
@@ -194,6 +228,14 @@ define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_a4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    movs r2, #1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_a4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mov r2, #1
@@ -245,6 +287,25 @@ define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    mov.w r12, #1
+; V7M-NEXT:    subs.w lr, r2, #32
+; V7M-NEXT:    lsl.w r2, r12, r2
+; V7M-NEXT:    lsr.w r3, r12, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r12, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r2, #1
+; V7M-NEXT:    sbc r3, r3, #0
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_a0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -324,6 +385,26 @@ define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_a0_masked:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    and r2, r2, #63
+; V7M-NEXT:    mov.w r12, #1
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    subs.w lr, r2, #32
+; V7M-NEXT:    lsl.w r2, r12, r2
+; V7M-NEXT:    lsr.w r3, r12, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r12, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r2, #1
+; V7M-NEXT:    sbc r3, r3, #0
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_a0_masked:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -406,6 +487,25 @@ define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_a1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    mov.w r12, #1
+; V7M-NEXT:    subs.w lr, r2, #32
+; V7M-NEXT:    lsl.w r2, r12, r2
+; V7M-NEXT:    lsr.w r3, r12, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r12, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r2, #1
+; V7M-NEXT:    sbc r3, r3, #0
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_a1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -483,6 +583,24 @@ define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_a2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r2, #32
+; V7M-NEXT:    movs r3, #1
+; V7M-NEXT:    subs.w r12, r2, #32
+; V7M-NEXT:    lsl.w r2, r3, r2
+; V7M-NEXT:    lsr.w r1, r3, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r3, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r2, #1
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    sbc r1, r1, #0
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_a2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r6, r11, lr}
@@ -563,6 +681,24 @@ define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_a3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r2, r1, #32
+; V7M-NEXT:    movs r3, #1
+; V7M-NEXT:    subs.w r12, r1, #32
+; V7M-NEXT:    lsl.w r1, r3, r1
+; V7M-NEXT:    lsr.w r2, r3, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r3, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    subs r3, r1, #1
+; V7M-NEXT:    sbc r1, r2, #0
+; V7M-NEXT:    ldrd r0, r2, [r0]
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_a3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r6, r11, lr}
@@ -646,6 +782,25 @@ define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r3
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_a4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    mov.w r12, #1
+; V7M-NEXT:    subs.w lr, r2, #32
+; V7M-NEXT:    lsl.w r2, r12, r2
+; V7M-NEXT:    lsr.w r3, r12, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r12, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    subs r2, #1
+; V7M-NEXT:    sbc r3, r3, #0
+; V7M-NEXT:    ands r0, r2
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_a4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -715,6 +870,13 @@ define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_b0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_b0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mvn r2, #0
@@ -749,6 +911,13 @@ define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_b1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_b1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mvn r2, #0
@@ -785,6 +954,14 @@ define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_b2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_b2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -824,6 +1001,14 @@ define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_b3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_b3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -863,6 +1048,13 @@ define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_b4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    bics r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_b4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    mvn r2, #0
@@ -905,6 +1097,19 @@ define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r1, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_b0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    bic.w r0, r0, r12
+; V7M-NEXT:    bics r1, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_b0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    subs r12, r2, #32
@@ -964,6 +1169,19 @@ define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r1, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_b1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    bic.w r0, r0, r12
+; V7M-NEXT:    bics r1, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_b1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    subs r12, r2, #32
@@ -1025,6 +1243,20 @@ define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    bic.w r1, r2, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_b2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r1, #-1
+; V7M-NEXT:    subs.w r12, r2, #32
+; V7M-NEXT:    lsl.w r3, r1, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    ldrd r0, r2, [r0]
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r1, r12
+; V7M-NEXT:    bics r0, r3
+; V7M-NEXT:    bic.w r1, r2, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_b2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, lr}
@@ -1091,6 +1323,20 @@ define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r0, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_b3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    subs.w r12, r1, #32
+; V7M-NEXT:    lsl.w r3, r2, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    ldrd r0, r1, [r0]
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r2, r12
+; V7M-NEXT:    bics r1, r2
+; V7M-NEXT:    bics r0, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_b3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r6, r11, lr}
@@ -1158,6 +1404,19 @@ define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    bics r1, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_b4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    bic.w r0, r0, r12
+; V7M-NEXT:    bics r1, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_b4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    subs r12, r2, #32
@@ -1216,29 +1475,33 @@ define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_c0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_c0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bzhi32_c0:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    rsb.w r1, r1, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bzhi32_c0:
 ; V6M:       @ %bb.0:
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
@@ -1256,21 +1519,28 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_c1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_c1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
-; V7A-NEXT:    mvn r2, #0
 ; V7A-NEXT:    uxtb r1, r1
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bzhi32_c1_indexzext:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    rsb.w r1, r1, #32
-; V7A-T-NEXT:    mov.w r2, #-1
 ; V7A-T-NEXT:    uxtb r1, r1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bzhi32_c1_indexzext:
@@ -1278,10 +1548,8 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
 ; V6M-NEXT:    uxtb r1, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %numhighbits = sub i8 32, %numlowbits
   %sh_prom = zext i8 %numhighbits to i32
@@ -1300,32 +1568,37 @@ define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_c2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_c2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
 ; V7A-NEXT:    rsb r1, r1, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bzhi32_c2_load:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    ldr r0, [r0]
 ; V7A-T-NEXT:    rsb.w r1, r1, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bzhi32_c2_load:
 ; V6M:       @ %bb.0:
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
 ; V6M-NEXT:    ldr r0, [r0]
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %val = load i32, ptr %w
   %numhighbits = sub i32 32, %numlowbits
@@ -1345,23 +1618,31 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_c3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_c3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
 ; V7A-NEXT:    ldr r0, [r0]
-; V7A-NEXT:    mvn r2, #0
 ; V7A-NEXT:    uxtb r1, r1
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bzhi32_c3_load_indexzext:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    rsb.w r1, r1, #32
 ; V7A-T-NEXT:    ldr r0, [r0]
-; V7A-T-NEXT:    mov.w r2, #-1
 ; V7A-T-NEXT:    uxtb r1, r1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bzhi32_c3_load_indexzext:
@@ -1369,11 +1650,9 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
 ; V6M-NEXT:    uxtb r1, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
 ; V6M-NEXT:    ldr r0, [r0]
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %val = load i32, ptr %w
   %numhighbits = sub i8 32, %numlowbits
@@ -1392,29 +1671,33 @@ define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_c4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_c4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsr r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    lsr r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: bzhi32_c4_commutative:
 ; V7A-T:       @ %bb.0:
 ; V7A-T-NEXT:    rsb.w r1, r1, #32
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsr.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: bzhi32_c4_commutative:
 ; V6M:       @ %bb.0:
 ; V6M-NEXT:    movs r2, #32
 ; V6M-NEXT:    subs r1, r2, r1
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsrs r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    lsrs r0, r1
 ; V6M-NEXT:    bx lr
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
@@ -1442,6 +1725,23 @@ define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_c0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsbs.w lr, r2, #32
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    mov.w r12, #-1
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsr.w r2, r12, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r3, r3, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_c0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1511,6 +1811,21 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_c1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    uxtb r2, r2
+; V7M-NEXT:    subs.w r12, r2, #32
+; V7M-NEXT:    lsr.w r2, r3, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r3, r3, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_c1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1587,6 +1902,22 @@ define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_c2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsbs.w r1, r2, #32
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl r3, r1
+; V7M-NEXT:    ldrd r0, r1, [r0]
+; V7M-NEXT:    mov.w r12, #-1
+; V7M-NEXT:    lsr.w r2, r12, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_c2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r5, lr}
@@ -1660,6 +1991,22 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r0, r3
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_c3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #64
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    subs.w r2, r1, #32
+; V7M-NEXT:    lsr.w r1, r3, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl r3, r2
+; V7M-NEXT:    ldrd r0, r2, [r0]
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_c3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r4, r6, r11, lr}
@@ -1741,6 +2088,23 @@ define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ands r1, r2
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_c4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsbs.w lr, r2, #32
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    mov.w r12, #-1
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsr.w r2, r12, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r3, r3, lr
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r2, #0
+; V7M-NEXT:    ands r0, r3
+; V7M-NEXT:    ands r1, r2
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_c4_commutative:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -1806,6 +2170,13 @@ define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_d0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_d0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
@@ -1842,6 +2213,14 @@ define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_d1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_d1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
@@ -1882,6 +2261,14 @@ define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_d2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_d2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -1923,6 +2310,15 @@ define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsrs r0, r1
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_d3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #32
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_d3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    rsb r1, r1, #32
@@ -1986,6 +2382,31 @@ define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_d0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r3, r2, #64
+; V7M-NEXT:    rsbs.w r2, r2, #32
+; V7M-NEXT:    rsb.w lr, r3, #32
+; V7M-NEXT:    lsl.w r12, r1, r3
+; V7M-NEXT:    lsr.w r1, r0, lr
+; V7M-NEXT:    orr.w r1, r1, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, r2
+; V7M-NEXT:    lsl.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r12, r1, lr
+; V7M-NEXT:    lsr.w r0, r0, r3
+; V7M-NEXT:    orr.w r0, r0, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r2
+; V7M-NEXT:    lsr.w r1, r1, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_d0:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -2072,6 +2493,30 @@ define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_d1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r2, r2, #64
+; V7M-NEXT:    uxtb r2, r2
+; V7M-NEXT:    rsb.w r3, r2, #32
+; V7M-NEXT:    lsl.w r12, r1, r2
+; V7M-NEXT:    lsr.w r1, r0, r3
+; V7M-NEXT:    orr.w r1, r1, r12
+; V7M-NEXT:    subs.w r12, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r1, r0, r12
+; V7M-NEXT:    lsl.w r0, r0, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r3, r1, r3
+; V7M-NEXT:    lsr.w r0, r0, r2
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r12
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_d1_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r11, lr}
@@ -2168,6 +2613,32 @@ define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    pop {r7, pc}
 ;
+; V7M-LABEL: bzhi64_d2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r7, lr}
+; V7M-NEXT:    push {r7, lr}
+; V7M-NEXT:    rsb.w r1, r2, #64
+; V7M-NEXT:    ldrd r0, r3, [r0]
+; V7M-NEXT:    rsb.w lr, r1, #32
+; V7M-NEXT:    rsbs.w r2, r2, #32
+; V7M-NEXT:    lsl.w r12, r3, r1
+; V7M-NEXT:    lsr.w r3, r0, lr
+; V7M-NEXT:    orr.w r3, r3, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r3, r0, r2
+; V7M-NEXT:    lsl.w r0, r0, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r12, r3, lr
+; V7M-NEXT:    lsr.w r0, r0, r1
+; V7M-NEXT:    lsr.w r1, r3, r1
+; V7M-NEXT:    orr.w r0, r0, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r3, r2
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    pop {r7, pc}
+;
 ; V7A-LABEL: bzhi64_d2_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r5, r7, r11, lr}
@@ -2261,6 +2732,31 @@ define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    movpl r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_d3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    rsb.w r1, r1, #64
+; V7M-NEXT:    ldrd r0, r2, [r0]
+; V7M-NEXT:    uxtb r1, r1
+; V7M-NEXT:    rsb.w r3, r1, #32
+; V7M-NEXT:    lsl.w r12, r2, r1
+; V7M-NEXT:    lsr.w r2, r0, r3
+; V7M-NEXT:    orr.w r2, r2, r12
+; V7M-NEXT:    subs.w r12, r1, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r2, r0, r12
+; V7M-NEXT:    lsl.w r0, r0, r1
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r0, #0
+; V7M-NEXT:    lsl.w r3, r2, r3
+; V7M-NEXT:    lsr.w r0, r0, r1
+; V7M-NEXT:    lsr.w r1, r2, r1
+; V7M-NEXT:    orr.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r2, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_d3_load_indexzext:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    .save {r5, r7, r11, lr}
@@ -2348,6 +2844,11 @@ define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
 ; CHECK-NEXT:    bic r0, r0, #-2147483648
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    bic r0, r0, #-2147483648
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    bic r0, r0, #-2147483648
@@ -2375,6 +2876,12 @@ define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind {
 ; CHECK-NEXT:    bic r0, r0, #-2147483648
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask32_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    bic r0, r0, #-2147483648
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask32_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2405,6 +2912,11 @@ define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
 ; CHECK-NEXT:    bfc r0, #15, #17
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask16:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    bfc r0, #15, #17
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask16:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    bfc r0, #15, #17
@@ -2435,6 +2947,12 @@ define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind {
 ; CHECK-NEXT:    bfc r0, #15, #17
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask16_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    bfc r0, #15, #17
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask16_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2468,6 +2986,11 @@ define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
 ; CHECK-NEXT:    and r0, r0, #127
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask8:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    and r0, r0, #127
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask8:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    and r0, r0, #127
@@ -2494,6 +3017,12 @@ define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind {
 ; CHECK-NEXT:    and r0, r0, #127
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi32_constant_mask8_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    and r0, r0, #127
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi32_constant_mask8_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2525,6 +3054,11 @@ define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
 ; CHECK-NEXT:    bic r1, r1, #-1073741824
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    bic r1, r1, #-1073741824
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask64:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    bic r1, r1, #-1073741824
@@ -2552,6 +3086,12 @@ define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind {
 ; CHECK-NEXT:    bic r1, r1, #-1073741824
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask64_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldrd r0, r1, [r0]
+; V7M-NEXT:    bic r1, r1, #-1073741824
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask64_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldrd r0, r1, [r0]
@@ -2585,6 +3125,12 @@ define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask32:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    bic r0, r0, #-2147483648
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask32:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    bic r0, r0, #-2147483648
@@ -2616,6 +3162,13 @@ define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind {
 ; CHECK-NEXT:    bic r0, r0, #-2147483648
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask32_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bic r0, r0, #-2147483648
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask32_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2650,6 +3203,12 @@ define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask16:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    bfc r0, #15, #17
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask16:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    bfc r0, #15, #17
@@ -2684,6 +3243,13 @@ define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind {
 ; CHECK-NEXT:    bfc r0, #15, #17
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask16_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bfc r0, #15, #17
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask16_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2721,6 +3287,12 @@ define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
 ; CHECK-NEXT:    movs r1, #0
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask8:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    and r0, r0, #127
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask8:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    and r0, r0, #127
@@ -2751,6 +3323,13 @@ define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
 ; CHECK-NEXT:    and r0, r0, #127
 ; CHECK-NEXT:    bx lr
 ;
+; V7M-LABEL: bzhi64_constant_mask8_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    movs r1, #0
+; V7M-NEXT:    and r0, r0, #127
+; V7M-NEXT:    bx lr
+;
 ; V7A-LABEL: bzhi64_constant_mask8_load:
 ; V7A:       @ %bb.0:
 ; V7A-NEXT:    ldr r0, [r0]
@@ -2776,5 +3355,3 @@ define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
   %masked = and i64 %val1, 127
   ret i64 %masked
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; V7M: {{.*}}



More information about the llvm-commits mailing list