[llvm] 05334de - [ARM] Long shift tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 12 12:02:09 PDT 2020


Author: David Green
Date: 2020-03-12T19:01:49Z
New Revision: 05334de67976d16cf38ea99182e369742b40f023

URL: https://github.com/llvm/llvm-project/commit/05334de67976d16cf38ea99182e369742b40f023
DIFF: https://github.com/llvm/llvm-project/commit/05334de67976d16cf38ea99182e369742b40f023.diff

LOG: [ARM] Long shift tests. NFC

Added: 
    llvm/test/CodeGen/Thumb2/fir.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-const.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-demand.ll
    llvm/test/CodeGen/Thumb2/shift_parts.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/ARM/shift_parts.ll


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/fir.ll b/llvm/test/CodeGen/Thumb2/fir.ll
new file mode 100644
index 000000000000..d03fb3d6279f
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/fir.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --verify-machineinstrs -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-MVE
+; RUN: llc --verify-machineinstrs -mtriple=thumbv8.1m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-NOMVE
+
+define void @test1(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
+; CHECK-MVE-LABEL: test1:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    ldr r1, [r1]
+; CHECK-MVE-NEXT:    ldr r2, [r2]
+; CHECK-MVE-NEXT:    ldr r0, [r0]
+; CHECK-MVE-NEXT:    smull r2, r1, r2, r1
+; CHECK-MVE-NEXT:    lsrl r2, r1, #31
+; CHECK-MVE-NEXT:    bic r1, r2, #1
+; CHECK-MVE-NEXT:    add r0, r1
+; CHECK-MVE-NEXT:    str r0, [r3]
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NOMVE-LABEL: test1:
+; CHECK-NOMVE:       @ %bb.0: @ %entry
+; CHECK-NOMVE-NEXT:    ldr r1, [r1]
+; CHECK-NOMVE-NEXT:    ldr r2, [r2]
+; CHECK-NOMVE-NEXT:    ldr r0, [r0]
+; CHECK-NOMVE-NEXT:    smmul r1, r2, r1
+; CHECK-NOMVE-NEXT:    add.w r0, r0, r1, lsl #1
+; CHECK-NOMVE-NEXT:    str r0, [r3]
+; CHECK-NOMVE-NEXT:    bx lr
+entry:
+  %l3 = load i32, i32* %p0, align 4
+  %l4 = load i32, i32* %p1, align 4
+  %conv5.us = sext i32 %l4 to i64
+  %l5 = load i32, i32* %p2, align 4
+  %conv6.us = sext i32 %l5 to i64
+  %mul.us = mul nsw i64 %conv6.us, %conv5.us
+  %l6 = lshr i64 %mul.us, 31
+  %l7 = trunc i64 %l6 to i32
+  %shl.us = and i32 %l7, -2
+  %add.us = add nsw i32 %shl.us, %l3
+  store i32 %add.us, i32* %pDst, align 4
+  ret void
+}
+
+define void @test2(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
+; CHECK-LABEL: test2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    ldr r2, [r2]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    smmul r1, r2, r1
+; CHECK-NEXT:    add.w r0, r0, r1, lsl #1
+; CHECK-NEXT:    str r0, [r3]
+; CHECK-NEXT:    bx lr
+entry:
+  %l3 = load i32, i32* %p0, align 4
+  %l4 = load i32, i32* %p1, align 4
+  %conv5.us = sext i32 %l4 to i64
+  %l5 = load i32, i32* %p2, align 4
+  %conv6.us = sext i32 %l5 to i64
+  %mul.us = mul nsw i64 %conv6.us, %conv5.us
+  %l6 = lshr i64 %mul.us, 32
+  %shl74.us = shl nuw nsw i64 %l6, 1
+  %shl.us = trunc i64 %shl74.us to i32
+  %add.us = add nsw i32 %l3, %shl.us
+  store i32 %add.us, i32* %pDst, align 4
+  ret void
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-const.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-const.ll
new file mode 100644
index 000000000000..b2bae28dc310
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-const.ll
@@ -0,0 +1,408 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+
+declare {i32, i32} @llvm.arm.mve.asrl(i32, i32, i32)
+declare {i32, i32} @llvm.arm.mve.lsll(i32, i32, i32)
+
+define i64 @asrl_0(i64 %X) {
+; CHECK-LABEL: asrl_0:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 0)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_23(i64 %X) {
+; CHECK-LABEL: asrl_23:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #23
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 23)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_32(i64 %X) {
+; CHECK-LABEL: asrl_32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_33(i64 %X) {
+; CHECK-LABEL: asrl_33:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #33
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 33)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_63(i64 %X) {
+; CHECK-LABEL: asrl_63:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #63
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 63)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_64(i64 %X) {
+; CHECK-LABEL: asrl_64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #64
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 64)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_m2(i64 %X) {
+; CHECK-LABEL: asrl_m2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #1
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -2)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_m32(i64 %X) {
+; CHECK-LABEL: asrl_m32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_m33(i64 %X) {
+; CHECK-LABEL: asrl_m33:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #32
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -33)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @asrl_m64(i64 %X) {
+; CHECK-LABEL: asrl_m64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #63
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -64)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+
+
+
+define i64 @lsll_0(i64 %X) {
+; CHECK-LABEL: lsll_0:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 0)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_23(i64 %X) {
+; CHECK-LABEL: lsll_23:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #23
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 23)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_32(i64 %X) {
+; CHECK-LABEL: lsll_32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_33(i64 %X) {
+; CHECK-LABEL: lsll_33:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #33
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 33)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_63(i64 %X) {
+; CHECK-LABEL: lsll_63:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #63
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 63)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_64(i64 %X) {
+; CHECK-LABEL: lsll_64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #64
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 64)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_m2(i64 %X) {
+; CHECK-LABEL: lsll_m2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #1
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -2)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_m32(i64 %X) {
+; CHECK-LABEL: lsll_m32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_m33(i64 %X) {
+; CHECK-LABEL: lsll_m33:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #32
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -33)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}
+
+define i64 @lsll_m64(i64 %X) {
+; CHECK-LABEL: lsll_m64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #63
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -64)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %9 = or i64 %6, %8
+  ret i64 %9
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-demand.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-demand.ll
new file mode 100644
index 000000000000..ea5cad7a0c26
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/longshift-demand.ll
@@ -0,0 +1,908 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+
+declare {i32, i32} @llvm.arm.mve.asrl(i32, i32, i32)
+declare {i32, i32} @llvm.arm.mve.lsll(i32, i32, i32)
+
+define i32 @ashr_demand_bottom3(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottom3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottom3(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottom3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_bottomm3(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottomm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottomm3(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottomm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom31(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottom31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottom31(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottom31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_bottomm31(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottomm31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #30
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottomm31(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottomm31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #30
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom32(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottom32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottom32(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottom32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_bottomm32(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottomm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottomm32(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottomm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom44(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottom44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #44
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottom44(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottom44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #44
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_bottomm44(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottomm44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #43
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_bottomm44(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottomm44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #43
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+
+
+
+
+
+define i32 @ashr_demand_top3(i64 %X) {
+; CHECK-LABEL: ashr_demand_top3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_top3(i64 %X) {
+; CHECK-LABEL: lsll_demand_top3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_topm3(i64 %X) {
+; CHECK-LABEL: ashr_demand_topm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_topm3(i64 %X) {
+; CHECK-LABEL: lsll_demand_topm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_top31(i64 %X) {
+; CHECK-LABEL: ashr_demand_top31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_top31(i64 %X) {
+; CHECK-LABEL: lsll_demand_top31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_topm31(i64 %X) {
+; CHECK-LABEL: ashr_demand_topm31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #30
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_topm31(i64 %X) {
+; CHECK-LABEL: lsll_demand_topm31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #30
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -31)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_top32(i64 %X) {
+; CHECK-LABEL: ashr_demand_top32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_top32(i64 %X) {
+; CHECK-LABEL: lsll_demand_top32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_topm32(i64 %X) {
+; CHECK-LABEL: ashr_demand_topm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_topm32(i64 %X) {
+; CHECK-LABEL: lsll_demand_topm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_top44(i64 %X) {
+; CHECK-LABEL: ashr_demand_top44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #44
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_top44(i64 %X) {
+; CHECK-LABEL: lsll_demand_top44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #44
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @ashr_demand_topm44(i64 %X) {
+; CHECK-LABEL: ashr_demand_topm44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #43
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+define i32 @lsll_demand_topm44(i64 %X) {
+; CHECK-LABEL: lsll_demand_topm44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #43
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -44)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %sm = lshr i64 %shr, 32
+  %t = trunc i64 %sm to i32
+  ret i32 %t
+}
+
+
+
+define i32 @ashr_demand_bottommask3(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottommask3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lsll_demand_bottommask3(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottommask3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #3
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @ashr_demand_bottommaskm3(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottommaskm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lsll_demand_bottommaskm3(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottommaskm3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #2
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -3)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+
+define i32 @ashr_demand_bottommask32(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottommask32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lsll_demand_bottommask32(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottommask32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r2, #32
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @ashr_demand_bottommaskm32(i64 %X) {
+; CHECK-LABEL: ashr_demand_bottommaskm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    asrl r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.asrl(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lsll_demand_bottommaskm32(i64 %X) {
+; CHECK-LABEL: lsll_demand_bottommaskm32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r2, #31
+; CHECK-NEXT:    lsll r0, r1, r2
+; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = lshr i64 %X, 32
+  %1 = trunc i64 %0 to i32
+  %2 = trunc i64 %X to i32
+  %3 = call { i32, i32 } @llvm.arm.mve.lsll(i32 %2, i32 %1, i32 -32)
+  %4 = extractvalue { i32, i32 } %3, 1
+  %5 = zext i32 %4 to i64
+  %6 = shl nuw i64 %5, 32
+  %7 = extractvalue { i32, i32 } %3, 0
+  %8 = zext i32 %7 to i64
+  %shr = or i64 %6, %8
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}

diff  --git a/llvm/test/CodeGen/ARM/shift_parts.ll b/llvm/test/CodeGen/Thumb2/shift_parts.ll
similarity index 53%
rename from llvm/test/CodeGen/ARM/shift_parts.ll
rename to llvm/test/CodeGen/Thumb2/shift_parts.ll
index bb429edc623e..721c63ddf3ac 100644
--- a/llvm/test/CodeGen/ARM/shift_parts.ll
+++ b/llvm/test/CodeGen/Thumb2/shift_parts.ll
@@ -259,3 +259,275 @@ entry:
   store i192 %bf.clear4, i192* %0, align 8
   ret void
 }
+
+
+define i32 @ashr_demand_bottom3(i64 %x) {
+; CHECK-MVE-LABEL: ashr_demand_bottom3:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #3
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: ashr_demand_bottom3:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsrs r0, r0, #3
+; CHECK-NON-MVE-NEXT:    orr.w r0, r0, r1, lsl #29
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 3
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lshr_demand_bottom3(i64 %x) {
+; CHECK-MVE-LABEL: lshr_demand_bottom3:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #3
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: lshr_demand_bottom3:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsrs r0, r0, #3
+; CHECK-NON-MVE-NEXT:    orr.w r0, r0, r1, lsl #29
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 3
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsl_demand_bottom3(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottom3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #3
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 3
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom31(i64 %x) {
+; CHECK-MVE-LABEL: ashr_demand_bottom31:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: ashr_demand_bottom31:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsrs r0, r0, #31
+; CHECK-NON-MVE-NEXT:    orr.w r0, r0, r1, lsl #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lshr_demand_bottom31(i64 %x) {
+; CHECK-MVE-LABEL: lshr_demand_bottom31:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: lshr_demand_bottom31:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsrs r0, r0, #31
+; CHECK-NON-MVE-NEXT:    orr.w r0, r0, r1, lsl #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsl_demand_bottom31(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottom31:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #31
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 31
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom32(i64 %x) {
+; CHECK-LABEL: ashr_demand_bottom32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 32
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lshr_demand_bottom32(i64 %x) {
+; CHECK-LABEL: lshr_demand_bottom32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 32
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsl_demand_bottom32(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottom32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 32
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottom44(i64 %x) {
+; CHECK-LABEL: ashr_demand_bottom44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    asrs r0, r1, #12
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 44
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lshr_demand_bottom44(i64 %x) {
+; CHECK-LABEL: lshr_demand_bottom44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsrs r0, r1, #12
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 44
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+define i32 @lsl_demand_bottom44(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottom44:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 44
+  %t = trunc i64 %shr to i32
+  ret i32 %t
+}
+
+
+define i32 @ashr_demand_bottommask(i64 %x) {
+; CHECK-MVE-LABEL: ashr_demand_bottommask:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bic r0, r0, #1
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: ashr_demand_bottommask:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsls r0, r1, #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lshr_demand_bottommask(i64 %x) {
+; CHECK-MVE-LABEL: lshr_demand_bottommask:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bic r0, r0, #1
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: lshr_demand_bottommask:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    lsls r0, r1, #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @lsl_demand_bottommask(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottommask:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #31
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -2
+  ret i32 %a
+}
+
+define i32 @ashr_demand_bottommask2(i64 %x) {
+; CHECK-MVE-LABEL: ashr_demand_bottommask2:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bic r0, r0, #3
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: ashr_demand_bottommask2:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    mvn r0, #2
+; CHECK-NON-MVE-NEXT:    and.w r0, r0, r1, lsl #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = ashr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -4
+  ret i32 %a
+}
+
+define i32 @lshr_demand_bottommask2(i64 %x) {
+; CHECK-MVE-LABEL: lshr_demand_bottommask2:
+; CHECK-MVE:       @ %bb.0: @ %entry
+; CHECK-MVE-NEXT:    lsrl r0, r1, #31
+; CHECK-MVE-NEXT:    bic r0, r0, #3
+; CHECK-MVE-NEXT:    bx lr
+;
+; CHECK-NON-MVE-LABEL: lshr_demand_bottommask2:
+; CHECK-NON-MVE:       @ %bb.0: @ %entry
+; CHECK-NON-MVE-NEXT:    mvn r0, #2
+; CHECK-NON-MVE-NEXT:    and.w r0, r0, r1, lsl #1
+; CHECK-NON-MVE-NEXT:    bx lr
+entry:
+  %shr = lshr i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -4
+  ret i32 %a
+}
+
+define i32 @lsl_demand_bottommask2(i64 %x) {
+; CHECK-LABEL: lsl_demand_bottommask2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #31
+; CHECK-NEXT:    bx lr
+entry:
+  %shr = shl i64 %x, 31
+  %t = trunc i64 %shr to i32
+  %a = and i32 %t, -4
+  ret i32 %a
+}
+
+define i32 @lsl_demand_topmask(i64 %x) {
+; CHECK-LABEL: lsl_demand_topmask:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ubfx r0, r0, #1, #28
+; CHECK-NEXT:    bx lr
+entry:
+  %sh = shl i64 %x, 31
+  %a = and i64 %sh, 1152921500311879680 ;0x0fffffff00000000
+  %l = ashr i64 %a, 32
+  %t = trunc i64 %l to i32
+  ret i32 %t
+}


        


More information about the llvm-commits mailing list