[llvm] [X86][ARM][AArch64] shouldFoldMaskToVariableShiftPair should be true for scalars up to the biggest legal type (PR #156886)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 4 07:42:26 PDT 2025


https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/156886

>From 9df495e1fdebc6141f973fc599f2f802f2e9a27d Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 4 Sep 2025 09:32:45 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)

---
 .../test/CodeGen/AArch64/and-mask-variable.ll |  30 ++
 llvm/test/CodeGen/ARM/and-mask-variable.ll    |  94 ++++
 llvm/test/CodeGen/X86/and-mask-variable.ll    | 450 ++++++++++++++++++
 3 files changed, 574 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/and-mask-variable.ll
 create mode 100644 llvm/test/CodeGen/ARM/and-mask-variable.ll
 create mode 100644 llvm/test/CodeGen/X86/and-mask-variable.ll

diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
new file mode 100644
index 0000000000000..a92f3cf5ec092
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; CHECK-LABEL: mask_pair:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
+; CHECK-NEXT:    lsl w8, w8, w1
+; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    ret
+  %shl = shl nsw i32 -1, %y
+  %and = and i32 %shl, %x
+  ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; CHECK-LABEL: mask_pair_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    lsl x8, x8, x1
+; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    ret
+  %shl = shl nsw i64 -1, %y
+  %and = and i64 %shl, %x
+  ret i64 %and
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000000000..0b57fe278bf6e
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r2, #-1
+; V7M-NEXT:    lsl.w r1, r2, r1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mvn r2, #0
+; V7A-NEXT:    and r0, r0, r2, lsl r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r2, #-1
+; V7A-T-NEXT:    lsl.w r1, r2, r1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    movs r2, #0
+; V6M-NEXT:    mvns r2, r2
+; V6M-NEXT:    lsls r2, r1
+; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    bx lr
+  %shl = shl nsw i32 -1, %y
+  %and = and i32 %shl, %x
+  ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    and.w r0, r0, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    and.w r0, r0, r12
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shl = shl nsw i64 -1, %y
+  %and = and i64 %shl, %x
+  ret i64 %and
+}
diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll
new file mode 100644
index 0000000000000..54daa86dc6f36
--- /dev/null
+++ b/llvm/test/CodeGen/X86/and-mask-variable.ll
@@ -0,0 +1,450 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI2
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI2
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; X86-NOBMI-LABEL: mask_pair:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    shrl %cl, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1-LABEL: mask_pair:
+; X86-BMI1:       # %bb.0:
+; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT:    shrl %cl, %eax
+; X86-BMI1-NEXT:    shll %cl, %eax
+; X86-BMI1-NEXT:    retl
+;
+; X86-BMI2-LABEL: mask_pair:
+; X86-BMI2:       # %bb.0:
+; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT:    shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: mask_pair:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movl %esi, %ecx
+; X64-NOBMI-NEXT:    movl %edi, %eax
+; X64-NOBMI-NEXT:    shrl %cl, %eax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT:    shll %cl, %eax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1-LABEL: mask_pair:
+; X64-BMI1:       # %bb.0:
+; X64-BMI1-NEXT:    movl %esi, %ecx
+; X64-BMI1-NEXT:    movl %edi, %eax
+; X64-BMI1-NEXT:    shrl %cl, %eax
+; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-BMI1-NEXT:    shll %cl, %eax
+; X64-BMI1-NEXT:    retq
+;
+; X64-BMI2-LABEL: mask_pair:
+; X64-BMI2:       # %bb.0:
+; X64-BMI2-NEXT:    shrxl %esi, %edi, %eax
+; X64-BMI2-NEXT:    shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT:    retq
+  %shl = shl nsw i32 -1, %y
+  %and = and i32 %shl, %x
+  ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; X86-NOBMI-LABEL: mask_pair_64:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl $-1, %edx
+; X86-NOBMI-NEXT:    movl $-1, %eax
+; X86-NOBMI-NEXT:    shll %cl, %eax
+; X86-NOBMI-NEXT:    testb $32, %cl
+; X86-NOBMI-NEXT:    je .LBB1_2
+; X86-NOBMI-NEXT:  # %bb.1:
+; X86-NOBMI-NEXT:    movl %eax, %edx
+; X86-NOBMI-NEXT:    xorl %eax, %eax
+; X86-NOBMI-NEXT:  .LBB1_2:
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    retl
+;
+; X86-BMI1-LABEL: mask_pair_64:
+; X86-BMI1:       # %bb.0:
+; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT:    movl $-1, %edx
+; X86-BMI1-NEXT:    movl $-1, %eax
+; X86-BMI1-NEXT:    shll %cl, %eax
+; X86-BMI1-NEXT:    testb $32, %cl
+; X86-BMI1-NEXT:    je .LBB1_2
+; X86-BMI1-NEXT:  # %bb.1:
+; X86-BMI1-NEXT:    movl %eax, %edx
+; X86-BMI1-NEXT:    xorl %eax, %eax
+; X86-BMI1-NEXT:  .LBB1_2:
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT:    retl
+;
+; X86-BMI2-LABEL: mask_pair_64:
+; X86-BMI2:       # %bb.0:
+; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT:    movl $-1, %edx
+; X86-BMI2-NEXT:    shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT:    testb $32, %cl
+; X86-BMI2-NEXT:    je .LBB1_2
+; X86-BMI2-NEXT:  # %bb.1:
+; X86-BMI2-NEXT:    movl %eax, %edx
+; X86-BMI2-NEXT:    xorl %eax, %eax
+; X86-BMI2-NEXT:  .LBB1_2:
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT:    retl
+;
+; X64-NOBMI-LABEL: mask_pair_64:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rsi, %rcx
+; X64-NOBMI-NEXT:    movq %rdi, %rax
+; X64-NOBMI-NEXT:    shrq %cl, %rax
+; X64-NOBMI-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT:    shlq %cl, %rax
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1-LABEL: mask_pair_64:
+; X64-BMI1:       # %bb.0:
+; X64-BMI1-NEXT:    movq %rsi, %rcx
+; X64-BMI1-NEXT:    movq %rdi, %rax
+; X64-BMI1-NEXT:    shrq %cl, %rax
+; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-BMI1-NEXT:    shlq %cl, %rax
+; X64-BMI1-NEXT:    retq
+;
+; X64-BMI2-LABEL: mask_pair_64:
+; X64-BMI2:       # %bb.0:
+; X64-BMI2-NEXT:    shrxq %rsi, %rdi, %rax
+; X64-BMI2-NEXT:    shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT:    retq
+  %shl = shl nsw i64 -1, %y
+  %and = and i64 %shl, %x
+  ret i64 %and
+}
+
+define i128 @mask_pair_128(i128 %x, i128 %y) {
+; X86-NOBMI-LABEL: mask_pair_128:
+; X86-NOBMI:       # %bb.0:
+; X86-NOBMI-NEXT:    pushl %ebp
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 8
+; X86-NOBMI-NEXT:    pushl %ebx
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT:    pushl %edi
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT:    pushl %esi
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 20
+; X86-NOBMI-NEXT:    subl $76, %esp
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 96
+; X86-NOBMI-NEXT:    .cfi_offset %esi, -20
+; X86-NOBMI-NEXT:    .cfi_offset %edi, -16
+; X86-NOBMI-NEXT:    .cfi_offset %ebx, -12
+; X86-NOBMI-NEXT:    .cfi_offset %ebp, -8
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl %ecx, %edx
+; X86-NOBMI-NEXT:    shrb $3, %dl
+; X86-NOBMI-NEXT:    andb $12, %dl
+; X86-NOBMI-NEXT:    movzbl %dl, %esi
+; X86-NOBMI-NEXT:    movl 44(%esp,%esi), %edi
+; X86-NOBMI-NEXT:    movl %edi, %ebx
+; X86-NOBMI-NEXT:    shrl %cl, %ebx
+; X86-NOBMI-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl 40(%esp,%esi), %ebx
+; X86-NOBMI-NEXT:    movl %ebx, %ebp
+; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebp
+; X86-NOBMI-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl 32(%esp,%esi), %edi
+; X86-NOBMI-NEXT:    movl 36(%esp,%esi), %esi
+; X86-NOBMI-NEXT:    movl %esi, %ebp
+; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %ebp
+; X86-NOBMI-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edi
+; X86-NOBMI-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, (%esp)
+; X86-NOBMI-NEXT:    negb %dl
+; X86-NOBMI-NEXT:    movsbl %dl, %edi
+; X86-NOBMI-NEXT:    movl 16(%esp,%edi), %edx
+; X86-NOBMI-NEXT:    movl 20(%esp,%edi), %esi
+; X86-NOBMI-NEXT:    movl 24(%esp,%edi), %ebx
+; X86-NOBMI-NEXT:    movl %ebx, %ebp
+; X86-NOBMI-NEXT:    shldl %cl, %esi, %ebp
+; X86-NOBMI-NEXT:    movl 28(%esp,%edi), %edi
+; X86-NOBMI-NEXT:    shldl %cl, %ebx, %edi
+; X86-NOBMI-NEXT:    movl %edi, 12(%eax)
+; X86-NOBMI-NEXT:    movl %ebp, 8(%eax)
+; X86-NOBMI-NEXT:    movl %edx, %edi
+; X86-NOBMI-NEXT:    shll %cl, %edi
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shldl %cl, %edx, %esi
+; X86-NOBMI-NEXT:    movl %esi, 4(%eax)
+; X86-NOBMI-NEXT:    movl %edi, (%eax)
+; X86-NOBMI-NEXT:    addl $76, %esp
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 20
+; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 8
+; X86-NOBMI-NEXT:    popl %ebp
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 4
+; X86-NOBMI-NEXT:    retl $4
+;
+; X86-BMI1-LABEL: mask_pair_128:
+; X86-BMI1:       # %bb.0:
+; X86-BMI1-NEXT:    pushl %ebp
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
+; X86-BMI1-NEXT:    pushl %ebx
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT:    pushl %edi
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT:    pushl %esi
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 20
+; X86-BMI1-NEXT:    subl $76, %esp
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 96
+; X86-BMI1-NEXT:    .cfi_offset %esi, -20
+; X86-BMI1-NEXT:    .cfi_offset %edi, -16
+; X86-BMI1-NEXT:    .cfi_offset %ebx, -12
+; X86-BMI1-NEXT:    .cfi_offset %ebp, -8
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-BMI1-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl %ecx, %edx
+; X86-BMI1-NEXT:    shrb $3, %dl
+; X86-BMI1-NEXT:    andb $12, %dl
+; X86-BMI1-NEXT:    movzbl %dl, %esi
+; X86-BMI1-NEXT:    movl 44(%esp,%esi), %edi
+; X86-BMI1-NEXT:    movl %edi, %ebx
+; X86-BMI1-NEXT:    shrl %cl, %ebx
+; X86-BMI1-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl 40(%esp,%esi), %ebx
+; X86-BMI1-NEXT:    movl %ebx, %ebp
+; X86-BMI1-NEXT:    shrdl %cl, %edi, %ebp
+; X86-BMI1-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl 32(%esp,%esi), %edi
+; X86-BMI1-NEXT:    movl 36(%esp,%esi), %esi
+; X86-BMI1-NEXT:    movl %esi, %ebp
+; X86-BMI1-NEXT:    shrdl %cl, %ebx, %ebp
+; X86-BMI1-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI1-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, (%esp)
+; X86-BMI1-NEXT:    negb %dl
+; X86-BMI1-NEXT:    movsbl %dl, %edi
+; X86-BMI1-NEXT:    movl 16(%esp,%edi), %edx
+; X86-BMI1-NEXT:    movl 20(%esp,%edi), %esi
+; X86-BMI1-NEXT:    movl 24(%esp,%edi), %ebx
+; X86-BMI1-NEXT:    movl %ebx, %ebp
+; X86-BMI1-NEXT:    shldl %cl, %esi, %ebp
+; X86-BMI1-NEXT:    movl 28(%esp,%edi), %edi
+; X86-BMI1-NEXT:    shldl %cl, %ebx, %edi
+; X86-BMI1-NEXT:    movl %edi, 12(%eax)
+; X86-BMI1-NEXT:    movl %ebp, 8(%eax)
+; X86-BMI1-NEXT:    movl %edx, %edi
+; X86-BMI1-NEXT:    shll %cl, %edi
+; X86-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT:    shldl %cl, %edx, %esi
+; X86-BMI1-NEXT:    movl %esi, 4(%eax)
+; X86-BMI1-NEXT:    movl %edi, (%eax)
+; X86-BMI1-NEXT:    addl $76, %esp
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 20
+; X86-BMI1-NEXT:    popl %esi
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT:    popl %edi
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT:    popl %ebx
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
+; X86-BMI1-NEXT:    popl %ebp
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 4
+; X86-BMI1-NEXT:    retl $4
+;
+; X86-BMI2-LABEL: mask_pair_128:
+; X86-BMI2:       # %bb.0:
+; X86-BMI2-NEXT:    pushl %ebp
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
+; X86-BMI2-NEXT:    pushl %ebx
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT:    pushl %edi
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT:    pushl %esi
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 20
+; X86-BMI2-NEXT:    subl $76, %esp
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 96
+; X86-BMI2-NEXT:    .cfi_offset %esi, -20
+; X86-BMI2-NEXT:    .cfi_offset %edi, -16
+; X86-BMI2-NEXT:    .cfi_offset %ebx, -12
+; X86-BMI2-NEXT:    .cfi_offset %ebp, -8
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-BMI2-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl %ecx, %edx
+; X86-BMI2-NEXT:    shrb $3, %dl
+; X86-BMI2-NEXT:    andb $12, %dl
+; X86-BMI2-NEXT:    movzbl %dl, %esi
+; X86-BMI2-NEXT:    movl 44(%esp,%esi), %edi
+; X86-BMI2-NEXT:    shrxl %ecx, %edi, %ebx
+; X86-BMI2-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl 40(%esp,%esi), %ebx
+; X86-BMI2-NEXT:    movl %ebx, %ebp
+; X86-BMI2-NEXT:    shrdl %cl, %edi, %ebp
+; X86-BMI2-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl 32(%esp,%esi), %edi
+; X86-BMI2-NEXT:    movl 36(%esp,%esi), %esi
+; X86-BMI2-NEXT:    movl %esi, %ebp
+; X86-BMI2-NEXT:    shrdl %cl, %ebx, %ebp
+; X86-BMI2-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    shrdl %cl, %esi, %edi
+; X86-BMI2-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, (%esp)
+; X86-BMI2-NEXT:    negb %dl
+; X86-BMI2-NEXT:    movsbl %dl, %edi
+; X86-BMI2-NEXT:    movl 16(%esp,%edi), %edx
+; X86-BMI2-NEXT:    movl 20(%esp,%edi), %esi
+; X86-BMI2-NEXT:    movl 24(%esp,%edi), %ebx
+; X86-BMI2-NEXT:    movl %ebx, %ebp
+; X86-BMI2-NEXT:    shldl %cl, %esi, %ebp
+; X86-BMI2-NEXT:    movl 28(%esp,%edi), %edi
+; X86-BMI2-NEXT:    shldl %cl, %ebx, %edi
+; X86-BMI2-NEXT:    movl %edi, 12(%eax)
+; X86-BMI2-NEXT:    movl %ebp, 8(%eax)
+; X86-BMI2-NEXT:    shlxl %ecx, %edx, %edi
+; X86-BMI2-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-BMI2-NEXT:    shldl %cl, %edx, %esi
+; X86-BMI2-NEXT:    movl %esi, 4(%eax)
+; X86-BMI2-NEXT:    movl %edi, (%eax)
+; X86-BMI2-NEXT:    addl $76, %esp
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 20
+; X86-BMI2-NEXT:    popl %esi
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT:    popl %edi
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT:    popl %ebx
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
+; X86-BMI2-NEXT:    popl %ebp
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 4
+; X86-BMI2-NEXT:    retl $4
+;
+; X64-NOBMI-LABEL: mask_pair_128:
+; X64-NOBMI:       # %bb.0:
+; X64-NOBMI-NEXT:    movq %rdx, %rcx
+; X64-NOBMI-NEXT:    shrdq %cl, %rsi, %rdi
+; X64-NOBMI-NEXT:    shrq %cl, %rsi
+; X64-NOBMI-NEXT:    xorl %eax, %eax
+; X64-NOBMI-NEXT:    testb $64, %cl
+; X64-NOBMI-NEXT:    cmovneq %rsi, %rdi
+; X64-NOBMI-NEXT:    cmovneq %rax, %rsi
+; X64-NOBMI-NEXT:    movq %rdi, %rdx
+; X64-NOBMI-NEXT:    shlq %cl, %rdx
+; X64-NOBMI-NEXT:    testb $64, %cl
+; X64-NOBMI-NEXT:    cmoveq %rdx, %rax
+; X64-NOBMI-NEXT:    shldq %cl, %rdi, %rsi
+; X64-NOBMI-NEXT:    testb $64, %cl
+; X64-NOBMI-NEXT:    cmoveq %rsi, %rdx
+; X64-NOBMI-NEXT:    retq
+;
+; X64-BMI1-LABEL: mask_pair_128:
+; X64-BMI1:       # %bb.0:
+; X64-BMI1-NEXT:    movq %rdx, %rcx
+; X64-BMI1-NEXT:    shrdq %cl, %rsi, %rdi
+; X64-BMI1-NEXT:    shrq %cl, %rsi
+; X64-BMI1-NEXT:    xorl %eax, %eax
+; X64-BMI1-NEXT:    testb $64, %cl
+; X64-BMI1-NEXT:    cmovneq %rsi, %rdi
+; X64-BMI1-NEXT:    cmovneq %rax, %rsi
+; X64-BMI1-NEXT:    movq %rdi, %rdx
+; X64-BMI1-NEXT:    shlq %cl, %rdx
+; X64-BMI1-NEXT:    testb $64, %cl
+; X64-BMI1-NEXT:    cmoveq %rdx, %rax
+; X64-BMI1-NEXT:    shldq %cl, %rdi, %rsi
+; X64-BMI1-NEXT:    testb $64, %cl
+; X64-BMI1-NEXT:    cmoveq %rsi, %rdx
+; X64-BMI1-NEXT:    retq
+;
+; X64-BMI2-LABEL: mask_pair_128:
+; X64-BMI2:       # %bb.0:
+; X64-BMI2-NEXT:    movq %rdx, %rcx
+; X64-BMI2-NEXT:    shrdq %cl, %rsi, %rdi
+; X64-BMI2-NEXT:    shrxq %rdx, %rsi, %rdx
+; X64-BMI2-NEXT:    xorl %esi, %esi
+; X64-BMI2-NEXT:    testb $64, %cl
+; X64-BMI2-NEXT:    cmovneq %rdx, %rdi
+; X64-BMI2-NEXT:    shlxq %rcx, %rdi, %r8
+; X64-BMI2-NEXT:    movq %r8, %rax
+; X64-BMI2-NEXT:    cmovneq %rsi, %rax
+; X64-BMI2-NEXT:    cmovneq %rsi, %rdx
+; X64-BMI2-NEXT:    shldq %cl, %rdi, %rdx
+; X64-BMI2-NEXT:    testb $64, %cl
+; X64-BMI2-NEXT:    cmovneq %r8, %rdx
+; X64-BMI2-NEXT:    retq
+  %shl = shl nsw i128 -1, %y
+  %and = and i128 %shl, %x
+  ret i128 %and
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
+; X64: {{.*}}
+; X64-BMINOTBM: {{.*}}
+; X64-BMITBM: {{.*}}
+; X86: {{.*}}
+; X86-BMINOTBM: {{.*}}
+; X86-BMITBM: {{.*}}

>From 338d8b59a9a28e17c5c0d8928a6a1f3ab83cb02f Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 4 Sep 2025 09:54:59 -0400
Subject: [PATCH 2/2] [X86][ARM][AArch64] shouldFoldMaskToVariableShiftPair
 should be true for scalars up to the biggest legal type

For ARM, AArch64, and X86, we want to do this for scalars up to the biggest legal type.
---
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  10 +
 llvm/lib/Target/ARM/ARMISelLowering.h         |  10 +
 llvm/lib/Target/X86/X86ISelLowering.cpp       |   7 +-
 .../test/CodeGen/AArch64/and-mask-variable.ll |  78 +++-
 llvm/test/CodeGen/AArch64/extract-bits.ll     |  98 +++--
 llvm/test/CodeGen/AArch64/extract-lowbits.ll  |  66 ++--
 llvm/test/CodeGen/ARM/and-mask-variable.ll    |  20 +-
 llvm/test/CodeGen/X86/and-mask-variable.ll    | 336 +++++++-----------
 8 files changed, 285 insertions(+), 340 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 46738365080f9..8f3f1ffdec369 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -300,6 +300,16 @@ class AArch64TargetLowering : public TargetLowering {
   bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                          CombineLevel Level) const override;
 
+  /// Return true if it is profitable to fold a pair of shifts into a mask.
+  bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+    EVT VT = Y.getValueType();
+
+    if (!VT.isVector())
+      return false;
+
+    return VT.getScalarSizeInBits() <= 64;
+  }
+
   bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
                                             unsigned SelectOpcode, SDValue X,
                                             SDValue Y) const override;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 196ecb1b9f678..2b2b4883a9778 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -773,6 +773,16 @@ class VectorType;
     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                            CombineLevel Level) const override;
 
+    /// Return true if it is profitable to fold a pair of shifts into a mask.
+  bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+    EVT VT = Y.getValueType();
+
+    if (!VT.isVector())
+      return false;
+
+    return VT.getScalarSizeInBits() <= 32;
+  }
+
     bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
                                               unsigned SelectOpcode, SDValue X,
                                               SDValue Y) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 08ae0d52d795e..a5dc7fae4a12a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3663,11 +3663,8 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
   if (VT.isVector())
     return false;
 
-  // 64-bit shifts on 32-bit targets produce really bad bloated code.
-  if (VT == MVT::i64 && !Subtarget.is64Bit())
-    return false;
-
-  return true;
+  unsigned MaxWidth = Subtarget.is64Bit() ? 64 : 32;
+  return VT.getScalarSizeInBits() <= MaxWidth;
 }
 
 TargetLowering::ShiftLegalizationStrategy
diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
index a92f3cf5ec092..891d70772845b 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-variable.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
@@ -3,28 +3,76 @@
 ; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define i32 @mask_pair(i32 %x, i32 %y) {
-; CHECK-LABEL: mask_pair:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
-; CHECK-NEXT:    lsl w8, w8, w1
-; CHECK-NEXT:    and w0, w8, w0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mask_pair:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsr w8, w0, w1
+; CHECK-SD-NEXT:    lsl w0, w8, w1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mask_pair:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #-1 // =0xffffffff
+; CHECK-GI-NEXT:    lsl w8, w8, w1
+; CHECK-GI-NEXT:    and w0, w8, w0
+; CHECK-GI-NEXT:    ret
   %shl = shl nsw i32 -1, %y
   %and = and i32 %shl, %x
   ret i32 %and
 }
 
 define i64 @mask_pair_64(i64 %x, i64 %y) {
-; CHECK-LABEL: mask_pair_64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    lsl x8, x8, x1
-; CHECK-NEXT:    and x0, x8, x0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mask_pair_64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    lsr x8, x0, x1
+; CHECK-SD-NEXT:    lsl x0, x8, x1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mask_pair_64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-GI-NEXT:    lsl x8, x8, x1
+; CHECK-GI-NEXT:    and x0, x8, x0
+; CHECK-GI-NEXT:    ret
   %shl = shl nsw i64 -1, %y
   %and = and i64 %shl, %x
   ret i64 %and
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-GI: {{.*}}
-; CHECK-SD: {{.*}}
+
+define i128 @mask_pair_128(i128 %x, i128 %y) {
+; CHECK-SD-LABEL: mask_pair_128:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-SD-NEXT:    mvn w9, w2
+; CHECK-SD-NEXT:    mov x10, #9223372036854775807 // =0x7fffffffffffffff
+; CHECK-SD-NEXT:    lsl x8, x8, x2
+; CHECK-SD-NEXT:    lsr x9, x10, x9
+; CHECK-SD-NEXT:    tst x2, #0x40
+; CHECK-SD-NEXT:    orr x9, x8, x9
+; CHECK-SD-NEXT:    csel x9, x8, x9, ne
+; CHECK-SD-NEXT:    csel x8, xzr, x8, ne
+; CHECK-SD-NEXT:    and x0, x8, x0
+; CHECK-SD-NEXT:    and x1, x9, x1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mask_pair_128:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #64 // =0x40
+; CHECK-GI-NEXT:    mov x9, #-1 // =0xffffffffffffffff
+; CHECK-GI-NEXT:    sub x10, x2, #64
+; CHECK-GI-NEXT:    sub x8, x8, x2
+; CHECK-GI-NEXT:    lsl x11, x9, x2
+; CHECK-GI-NEXT:    cmp x2, #64
+; CHECK-GI-NEXT:    lsr x8, x9, x8
+; CHECK-GI-NEXT:    lsl x9, x9, x10
+; CHECK-GI-NEXT:    csel x10, x11, xzr, lo
+; CHECK-GI-NEXT:    orr x8, x8, x11
+; CHECK-GI-NEXT:    and x0, x10, x0
+; CHECK-GI-NEXT:    csel x8, x8, x9, lo
+; CHECK-GI-NEXT:    cmp x2, #0
+; CHECK-GI-NEXT:    csinv x8, x8, xzr, ne
+; CHECK-GI-NEXT:    and x1, x8, x1
+; CHECK-GI-NEXT:    ret
+  %shl = shl nsw i128 -1, %y
+  %and = and i128 %shl, %x
+  ret i128 %and
+}
diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll
index 8e822d19a19b9..5a96116142b51 100644
--- a/llvm/test/CodeGen/AArch64/extract-bits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-bits.ll
@@ -532,11 +532,10 @@ define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_c0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg w8, w2
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr w10, w0, w1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsr w8, w0, w1
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
@@ -548,12 +547,11 @@ define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_c1_indexzext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32 // =0x20
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr w10, w0, w1
-; CHECK-NEXT:    sub w8, w8, w2
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsr w8, w0, w1
+; CHECK-NEXT:    mov w9, #32 // =0x20
+; CHECK-NEXT:    sub w9, w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
@@ -569,10 +567,9 @@ define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    neg w9, w2
-; CHECK-NEXT:    mov w10, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr w9, w10, w9
 ; CHECK-NEXT:    lsr w8, w8, w1
-; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
@@ -587,11 +584,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov w9, #32 // =0x20
-; CHECK-NEXT:    mov w10, #-1 // =0xffffffff
 ; CHECK-NEXT:    sub w9, w9, w2
 ; CHECK-NEXT:    lsr w8, w8, w1
-; CHECK-NEXT:    lsr w9, w10, w9
-; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
@@ -606,11 +602,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_c4_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg w8, w2
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr w10, w0, w1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w10, w8
+; CHECK-NEXT:    lsr w8, w0, w1
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
@@ -624,11 +619,10 @@ define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_c0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg x8, x2
-; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    lsr x10, x0, x1
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    and x0, x8, x10
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg x9, x2
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i64 64, %numlowbits
@@ -640,13 +634,12 @@ define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_c1_indexzext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #64 // =0x40
-; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
-; CHECK-NEXT:    lsr x10, x0, x1
-; CHECK-NEXT:    sub w8, w8, w2
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    and x0, x8, x10
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    mov w9, #64 // =0x40
+; CHECK-NEXT:    sub w9, w9, w2
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %skip = zext i8 %numskipbits to i64
   %shifted = lshr i64 %val, %skip
@@ -662,10 +655,9 @@ define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    neg x9, x2
-; CHECK-NEXT:    mov x10, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    lsr x9, x10, x9
 ; CHECK-NEXT:    lsr x8, x8, x1
-; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %val = load i64, ptr %w
   %shifted = lshr i64 %val, %numskipbits
@@ -679,13 +671,12 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; CHECK-LABEL: bextr64_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    mov w9, #64 // =0x40
-; CHECK-NEXT:    mov x10, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sub w9, w9, w2
-; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr x8, x8, x1
-; CHECK-NEXT:    lsr x9, x10, x9
-; CHECK-NEXT:    and x0, x9, x8
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %val = load i64, ptr %w
   %skip = zext i8 %numskipbits to i64
@@ -700,11 +691,10 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_c4_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg x8, x2
-; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    lsr x10, x0, x1
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    and x0, x10, x8
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg x9, x2
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i64 64, %numlowbits
@@ -737,11 +727,10 @@ define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_32_c1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg w8, w2
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr x10, x0, x1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
   %truncshifted = trunc i64 %shifted to i32
@@ -756,11 +745,10 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_32_c2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    neg w8, w2
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    lsr x10, x0, x1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsr x8, x0, x1
+; CHECK-NEXT:    neg w9, w2
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
diff --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
index 4b8f3e86b5fef..368440c65df84 100644
--- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
@@ -347,10 +347,9 @@ define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
 define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
-; CHECK-NEXT:    neg w9, w1
-; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    lsl w9, w0, w8
+; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
@@ -362,10 +361,9 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c1_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32 // =0x20
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
 ; CHECK-NEXT:    sub w8, w8, w1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w0
+; CHECK-NEXT:    lsl w9, w0, w8
+; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i8 32, %numlowbits
   %sh_prom = zext i8 %numhighbits to i32
@@ -377,11 +375,10 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
 define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c2_load:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
+; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    neg w9, w1
-; CHECK-NEXT:    ldr w10, [x0]
-; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %w
   %numhighbits = sub i32 32, %numlowbits
@@ -394,11 +391,10 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32 // =0x20
-; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
-; CHECK-NEXT:    ldr w10, [x0]
+; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    sub w8, w8, w1
-; CHECK-NEXT:    lsr w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w10
+; CHECK-NEXT:    lsl w9, w9, w8
+; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
   %val = load i32, ptr %w
   %numhighbits = sub i8 32, %numlowbits
@@ -411,10 +407,9 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c4_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
-; CHECK-NEXT:    neg w9, w1
-; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    and w0, w0, w8
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    lsl w9, w0, w8
+; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
@@ -427,10 +422,9 @@ define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
 define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    neg x9, x1
-; CHECK-NEXT:    lsr x8, x8, x9
-; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    lsl x9, x0, x8
+; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i64 64, %numlowbits
   %mask = lshr i64 -1, %numhighbits
@@ -442,10 +436,9 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c1_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64 // =0x40
-; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    sub w8, w8, w1
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    and x0, x8, x0
+; CHECK-NEXT:    lsl x9, x0, x8
+; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i8 64, %numlowbits
   %sh_prom = zext i8 %numhighbits to i64
@@ -457,11 +450,10 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
 define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c2_load:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    neg x9, x1
-; CHECK-NEXT:    ldr x10, [x0]
-; CHECK-NEXT:    lsr x8, x8, x9
-; CHECK-NEXT:    and x0, x8, x10
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
   %val = load i64, ptr %w
   %numhighbits = sub i64 64, %numlowbits
@@ -474,11 +466,10 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64 // =0x40
-; CHECK-NEXT:    mov x9, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    ldr x10, [x0]
+; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    sub w8, w8, w1
-; CHECK-NEXT:    lsr x8, x9, x8
-; CHECK-NEXT:    and x0, x8, x10
+; CHECK-NEXT:    lsl x9, x9, x8
+; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
   %val = load i64, ptr %w
   %numhighbits = sub i8 64, %numlowbits
@@ -491,10 +482,9 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c4_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    neg x9, x1
-; CHECK-NEXT:    lsr x8, x8, x9
-; CHECK-NEXT:    and x0, x0, x8
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    lsl x9, x0, x8
+; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
   %numhighbits = sub i64 64, %numlowbits
   %mask = lshr i64 -1, %numhighbits
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
index 0b57fe278bf6e..0f84b76f97a6b 100644
--- a/llvm/test/CodeGen/ARM/and-mask-variable.ll
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -7,30 +7,26 @@
 define i32 @mask_pair(i32 %x, i32 %y) {
 ; V7M-LABEL: mask_pair:
 ; V7M:       @ %bb.0:
-; V7M-NEXT:    mov.w r2, #-1
-; V7M-NEXT:    lsl.w r1, r2, r1
-; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsls r0, r1
 ; V7M-NEXT:    bx lr
 ;
 ; V7A-LABEL: mask_pair:
 ; V7A:       @ %bb.0:
-; V7A-NEXT:    mvn r2, #0
-; V7A-NEXT:    and r0, r0, r2, lsl r1
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    lsl r0, r0, r1
 ; V7A-NEXT:    bx lr
 ;
 ; V7A-T-LABEL: mask_pair:
 ; V7A-T:       @ %bb.0:
-; V7A-T-NEXT:    mov.w r2, #-1
-; V7A-T-NEXT:    lsl.w r1, r2, r1
-; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsls r0, r1
 ; V7A-T-NEXT:    bx lr
 ;
 ; V6M-LABEL: mask_pair:
 ; V6M:       @ %bb.0:
-; V6M-NEXT:    movs r2, #0
-; V6M-NEXT:    mvns r2, r2
-; V6M-NEXT:    lsls r2, r1
-; V6M-NEXT:    ands r0, r2
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    lsls r0, r1
 ; V6M-NEXT:    bx lr
   %shl = shl nsw i32 -1, %y
   %and = and i32 %shl, %x
diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll
index 54daa86dc6f36..844a413391d75 100644
--- a/llvm/test/CodeGen/X86/and-mask-variable.ll
+++ b/llvm/test/CodeGen/X86/and-mask-variable.ll
@@ -141,307 +141,213 @@ define i64 @mask_pair_64(i64 %x, i64 %y) {
 define i128 @mask_pair_128(i128 %x, i128 %y) {
 ; X86-NOBMI-LABEL: mask_pair_128:
 ; X86-NOBMI:       # %bb.0:
-; X86-NOBMI-NEXT:    pushl %ebp
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NOBMI-NEXT:    pushl %ebx
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NOBMI-NEXT:    pushl %edi
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 12
 ; X86-NOBMI-NEXT:    pushl %esi
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 20
-; X86-NOBMI-NEXT:    subl $76, %esp
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 96
-; X86-NOBMI-NEXT:    .cfi_offset %esi, -20
-; X86-NOBMI-NEXT:    .cfi_offset %edi, -16
-; X86-NOBMI-NEXT:    .cfi_offset %ebx, -12
-; X86-NOBMI-NEXT:    .cfi_offset %ebp, -8
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT:    subl $32, %esp
+; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 48
+; X86-NOBMI-NEXT:    .cfi_offset %esi, -16
+; X86-NOBMI-NEXT:    .cfi_offset %edi, -12
+; X86-NOBMI-NEXT:    .cfi_offset %ebx, -8
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NOBMI-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NOBMI-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $-1, {{[0-9]+}}(%esp)
 ; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT:    movl $0, (%esp)
 ; X86-NOBMI-NEXT:    movl %ecx, %edx
 ; X86-NOBMI-NEXT:    shrb $3, %dl
 ; X86-NOBMI-NEXT:    andb $12, %dl
-; X86-NOBMI-NEXT:    movzbl %dl, %esi
-; X86-NOBMI-NEXT:    movl 44(%esp,%esi), %edi
-; X86-NOBMI-NEXT:    movl %edi, %ebx
-; X86-NOBMI-NEXT:    shrl %cl, %ebx
-; X86-NOBMI-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl 40(%esp,%esi), %ebx
-; X86-NOBMI-NEXT:    movl %ebx, %ebp
-; X86-NOBMI-NEXT:    shrdl %cl, %edi, %ebp
-; X86-NOBMI-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl 32(%esp,%esi), %edi
-; X86-NOBMI-NEXT:    movl 36(%esp,%esi), %esi
-; X86-NOBMI-NEXT:    movl %esi, %ebp
-; X86-NOBMI-NEXT:    shrdl %cl, %ebx, %ebp
-; X86-NOBMI-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    shrdl %cl, %esi, %edi
-; X86-NOBMI-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT:    movl $0, (%esp)
 ; X86-NOBMI-NEXT:    negb %dl
-; X86-NOBMI-NEXT:    movsbl %dl, %edi
-; X86-NOBMI-NEXT:    movl 16(%esp,%edi), %edx
-; X86-NOBMI-NEXT:    movl 20(%esp,%edi), %esi
-; X86-NOBMI-NEXT:    movl 24(%esp,%edi), %ebx
-; X86-NOBMI-NEXT:    movl %ebx, %ebp
-; X86-NOBMI-NEXT:    shldl %cl, %esi, %ebp
-; X86-NOBMI-NEXT:    movl 28(%esp,%edi), %edi
-; X86-NOBMI-NEXT:    shldl %cl, %ebx, %edi
-; X86-NOBMI-NEXT:    movl %edi, 12(%eax)
-; X86-NOBMI-NEXT:    movl %ebp, 8(%eax)
-; X86-NOBMI-NEXT:    movl %edx, %edi
-; X86-NOBMI-NEXT:    shll %cl, %edi
-; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    movsbl %dl, %ebx
+; X86-NOBMI-NEXT:    movl 24(%esp,%ebx), %edx
+; X86-NOBMI-NEXT:    movl 28(%esp,%ebx), %esi
 ; X86-NOBMI-NEXT:    shldl %cl, %edx, %esi
-; X86-NOBMI-NEXT:    movl %esi, 4(%eax)
+; X86-NOBMI-NEXT:    movl 16(%esp,%ebx), %edi
+; X86-NOBMI-NEXT:    movl 20(%esp,%ebx), %ebx
+; X86-NOBMI-NEXT:    shldl %cl, %ebx, %edx
+; X86-NOBMI-NEXT:    shldl %cl, %edi, %ebx
+; X86-NOBMI-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT:    shll %cl, %edi
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT:    andl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT:    movl %esi, 12(%eax)
+; X86-NOBMI-NEXT:    movl %edx, 8(%eax)
+; X86-NOBMI-NEXT:    movl %ebx, 4(%eax)
 ; X86-NOBMI-NEXT:    movl %edi, (%eax)
-; X86-NOBMI-NEXT:    addl $76, %esp
-; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 20
-; X86-NOBMI-NEXT:    popl %esi
+; X86-NOBMI-NEXT:    addl $32, %esp
 ; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 16
-; X86-NOBMI-NEXT:    popl %edi
+; X86-NOBMI-NEXT:    popl %esi
 ; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 12
-; X86-NOBMI-NEXT:    popl %ebx
+; X86-NOBMI-NEXT:    popl %edi
 ; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 8
-; X86-NOBMI-NEXT:    popl %ebp
+; X86-NOBMI-NEXT:    popl %ebx
 ; X86-NOBMI-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOBMI-NEXT:    retl $4
 ;
 ; X86-BMI1-LABEL: mask_pair_128:
 ; X86-BMI1:       # %bb.0:
-; X86-BMI1-NEXT:    pushl %ebp
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
 ; X86-BMI1-NEXT:    pushl %ebx
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
 ; X86-BMI1-NEXT:    pushl %edi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
 ; X86-BMI1-NEXT:    pushl %esi
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 20
-; X86-BMI1-NEXT:    subl $76, %esp
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 96
-; X86-BMI1-NEXT:    .cfi_offset %esi, -20
-; X86-BMI1-NEXT:    .cfi_offset %edi, -16
-; X86-BMI1-NEXT:    .cfi_offset %ebx, -12
-; X86-BMI1-NEXT:    .cfi_offset %ebp, -8
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT:    subl $32, %esp
+; X86-BMI1-NEXT:    .cfi_def_cfa_offset 48
+; X86-BMI1-NEXT:    .cfi_offset %esi, -16
+; X86-BMI1-NEXT:    .cfi_offset %edi, -12
+; X86-BMI1-NEXT:    .cfi_offset %ebx, -8
 ; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-BMI1-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $-1, {{[0-9]+}}(%esp)
 ; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT:    movl $0, (%esp)
 ; X86-BMI1-NEXT:    movl %ecx, %edx
 ; X86-BMI1-NEXT:    shrb $3, %dl
 ; X86-BMI1-NEXT:    andb $12, %dl
-; X86-BMI1-NEXT:    movzbl %dl, %esi
-; X86-BMI1-NEXT:    movl 44(%esp,%esi), %edi
-; X86-BMI1-NEXT:    movl %edi, %ebx
-; X86-BMI1-NEXT:    shrl %cl, %ebx
-; X86-BMI1-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl 40(%esp,%esi), %ebx
-; X86-BMI1-NEXT:    movl %ebx, %ebp
-; X86-BMI1-NEXT:    shrdl %cl, %edi, %ebp
-; X86-BMI1-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl 32(%esp,%esi), %edi
-; X86-BMI1-NEXT:    movl 36(%esp,%esi), %esi
-; X86-BMI1-NEXT:    movl %esi, %ebp
-; X86-BMI1-NEXT:    shrdl %cl, %ebx, %ebp
-; X86-BMI1-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    shrdl %cl, %esi, %edi
-; X86-BMI1-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT:    movl $0, (%esp)
 ; X86-BMI1-NEXT:    negb %dl
-; X86-BMI1-NEXT:    movsbl %dl, %edi
-; X86-BMI1-NEXT:    movl 16(%esp,%edi), %edx
-; X86-BMI1-NEXT:    movl 20(%esp,%edi), %esi
-; X86-BMI1-NEXT:    movl 24(%esp,%edi), %ebx
-; X86-BMI1-NEXT:    movl %ebx, %ebp
-; X86-BMI1-NEXT:    shldl %cl, %esi, %ebp
-; X86-BMI1-NEXT:    movl 28(%esp,%edi), %edi
-; X86-BMI1-NEXT:    shldl %cl, %ebx, %edi
-; X86-BMI1-NEXT:    movl %edi, 12(%eax)
-; X86-BMI1-NEXT:    movl %ebp, 8(%eax)
-; X86-BMI1-NEXT:    movl %edx, %edi
-; X86-BMI1-NEXT:    shll %cl, %edi
-; X86-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT:    movsbl %dl, %ebx
+; X86-BMI1-NEXT:    movl 24(%esp,%ebx), %edx
+; X86-BMI1-NEXT:    movl 28(%esp,%ebx), %esi
 ; X86-BMI1-NEXT:    shldl %cl, %edx, %esi
-; X86-BMI1-NEXT:    movl %esi, 4(%eax)
+; X86-BMI1-NEXT:    movl 16(%esp,%ebx), %edi
+; X86-BMI1-NEXT:    movl 20(%esp,%ebx), %ebx
+; X86-BMI1-NEXT:    shldl %cl, %ebx, %edx
+; X86-BMI1-NEXT:    shldl %cl, %edi, %ebx
+; X86-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT:    shll %cl, %edi
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %esi
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edi
+; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %ebx
+; X86-BMI1-NEXT:    movl %esi, 12(%eax)
+; X86-BMI1-NEXT:    movl %edx, 8(%eax)
+; X86-BMI1-NEXT:    movl %ebx, 4(%eax)
 ; X86-BMI1-NEXT:    movl %edi, (%eax)
-; X86-BMI1-NEXT:    addl $76, %esp
-; X86-BMI1-NEXT:    .cfi_def_cfa_offset 20
-; X86-BMI1-NEXT:    popl %esi
+; X86-BMI1-NEXT:    addl $32, %esp
 ; X86-BMI1-NEXT:    .cfi_def_cfa_offset 16
-; X86-BMI1-NEXT:    popl %edi
+; X86-BMI1-NEXT:    popl %esi
 ; X86-BMI1-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT:    popl %ebx
+; X86-BMI1-NEXT:    popl %edi
 ; X86-BMI1-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT:    popl %ebp
+; X86-BMI1-NEXT:    popl %ebx
 ; X86-BMI1-NEXT:    .cfi_def_cfa_offset 4
 ; X86-BMI1-NEXT:    retl $4
 ;
 ; X86-BMI2-LABEL: mask_pair_128:
 ; X86-BMI2:       # %bb.0:
-; X86-BMI2-NEXT:    pushl %ebp
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
 ; X86-BMI2-NEXT:    pushl %ebx
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
 ; X86-BMI2-NEXT:    pushl %edi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
 ; X86-BMI2-NEXT:    pushl %esi
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 20
-; X86-BMI2-NEXT:    subl $76, %esp
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 96
-; X86-BMI2-NEXT:    .cfi_offset %esi, -20
-; X86-BMI2-NEXT:    .cfi_offset %edi, -16
-; X86-BMI2-NEXT:    .cfi_offset %ebx, -12
-; X86-BMI2-NEXT:    .cfi_offset %ebp, -8
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT:    subl $32, %esp
+; X86-BMI2-NEXT:    .cfi_def_cfa_offset 48
+; X86-BMI2-NEXT:    .cfi_offset %esi, -16
+; X86-BMI2-NEXT:    .cfi_offset %edi, -12
+; X86-BMI2-NEXT:    .cfi_offset %ebx, -8
 ; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-BMI2-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $-1, {{[0-9]+}}(%esp)
 ; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT:    movl $0, (%esp)
 ; X86-BMI2-NEXT:    movl %ecx, %edx
 ; X86-BMI2-NEXT:    shrb $3, %dl
 ; X86-BMI2-NEXT:    andb $12, %dl
-; X86-BMI2-NEXT:    movzbl %dl, %esi
-; X86-BMI2-NEXT:    movl 44(%esp,%esi), %edi
-; X86-BMI2-NEXT:    shrxl %ecx, %edi, %ebx
-; X86-BMI2-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl 40(%esp,%esi), %ebx
-; X86-BMI2-NEXT:    movl %ebx, %ebp
-; X86-BMI2-NEXT:    shrdl %cl, %edi, %ebp
-; X86-BMI2-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl 32(%esp,%esi), %edi
-; X86-BMI2-NEXT:    movl 36(%esp,%esi), %esi
-; X86-BMI2-NEXT:    movl %esi, %ebp
-; X86-BMI2-NEXT:    shrdl %cl, %ebx, %ebp
-; X86-BMI2-NEXT:    movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    shrdl %cl, %esi, %edi
-; X86-BMI2-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT:    movl $0, (%esp)
 ; X86-BMI2-NEXT:    negb %dl
 ; X86-BMI2-NEXT:    movsbl %dl, %edi
-; X86-BMI2-NEXT:    movl 16(%esp,%edi), %edx
-; X86-BMI2-NEXT:    movl 20(%esp,%edi), %esi
-; X86-BMI2-NEXT:    movl 24(%esp,%edi), %ebx
-; X86-BMI2-NEXT:    movl %ebx, %ebp
-; X86-BMI2-NEXT:    shldl %cl, %esi, %ebp
-; X86-BMI2-NEXT:    movl 28(%esp,%edi), %edi
-; X86-BMI2-NEXT:    shldl %cl, %ebx, %edi
-; X86-BMI2-NEXT:    movl %edi, 12(%eax)
-; X86-BMI2-NEXT:    movl %ebp, 8(%eax)
-; X86-BMI2-NEXT:    shlxl %ecx, %edx, %edi
-; X86-BMI2-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-BMI2-NEXT:    movl 24(%esp,%edi), %edx
+; X86-BMI2-NEXT:    movl 28(%esp,%edi), %esi
 ; X86-BMI2-NEXT:    shldl %cl, %edx, %esi
-; X86-BMI2-NEXT:    movl %esi, 4(%eax)
-; X86-BMI2-NEXT:    movl %edi, (%eax)
-; X86-BMI2-NEXT:    addl $76, %esp
-; X86-BMI2-NEXT:    .cfi_def_cfa_offset 20
-; X86-BMI2-NEXT:    popl %esi
+; X86-BMI2-NEXT:    movl 16(%esp,%edi), %ebx
+; X86-BMI2-NEXT:    movl 20(%esp,%edi), %edi
+; X86-BMI2-NEXT:    shldl %cl, %edi, %edx
+; X86-BMI2-NEXT:    shldl %cl, %ebx, %edi
+; X86-BMI2-NEXT:    shlxl %ecx, %ebx, %ecx
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT:    movl %esi, 12(%eax)
+; X86-BMI2-NEXT:    movl %edx, 8(%eax)
+; X86-BMI2-NEXT:    movl %edi, 4(%eax)
+; X86-BMI2-NEXT:    movl %ecx, (%eax)
+; X86-BMI2-NEXT:    addl $32, %esp
 ; X86-BMI2-NEXT:    .cfi_def_cfa_offset 16
-; X86-BMI2-NEXT:    popl %edi
+; X86-BMI2-NEXT:    popl %esi
 ; X86-BMI2-NEXT:    .cfi_def_cfa_offset 12
-; X86-BMI2-NEXT:    popl %ebx
+; X86-BMI2-NEXT:    popl %edi
 ; X86-BMI2-NEXT:    .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT:    popl %ebp
+; X86-BMI2-NEXT:    popl %ebx
 ; X86-BMI2-NEXT:    .cfi_def_cfa_offset 4
 ; X86-BMI2-NEXT:    retl $4
 ;
 ; X64-NOBMI-LABEL: mask_pair_128:
 ; X64-NOBMI:       # %bb.0:
 ; X64-NOBMI-NEXT:    movq %rdx, %rcx
-; X64-NOBMI-NEXT:    shrdq %cl, %rsi, %rdi
-; X64-NOBMI-NEXT:    shrq %cl, %rsi
+; X64-NOBMI-NEXT:    movq $-1, %rdx
+; X64-NOBMI-NEXT:    movq $-1, %r8
+; X64-NOBMI-NEXT:    shlq %cl, %r8
 ; X64-NOBMI-NEXT:    xorl %eax, %eax
 ; X64-NOBMI-NEXT:    testb $64, %cl
-; X64-NOBMI-NEXT:    cmovneq %rsi, %rdi
-; X64-NOBMI-NEXT:    cmovneq %rax, %rsi
-; X64-NOBMI-NEXT:    movq %rdi, %rdx
-; X64-NOBMI-NEXT:    shlq %cl, %rdx
-; X64-NOBMI-NEXT:    testb $64, %cl
-; X64-NOBMI-NEXT:    cmoveq %rdx, %rax
-; X64-NOBMI-NEXT:    shldq %cl, %rdi, %rsi
-; X64-NOBMI-NEXT:    testb $64, %cl
-; X64-NOBMI-NEXT:    cmoveq %rsi, %rdx
+; X64-NOBMI-NEXT:    cmovneq %r8, %rdx
+; X64-NOBMI-NEXT:    cmoveq %r8, %rax
+; X64-NOBMI-NEXT:    andq %rdi, %rax
+; X64-NOBMI-NEXT:    andq %rsi, %rdx
 ; X64-NOBMI-NEXT:    retq
 ;
 ; X64-BMI1-LABEL: mask_pair_128:
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    movq %rdx, %rcx
-; X64-BMI1-NEXT:    shrdq %cl, %rsi, %rdi
-; X64-BMI1-NEXT:    shrq %cl, %rsi
+; X64-BMI1-NEXT:    movq $-1, %rdx
+; X64-BMI1-NEXT:    movq $-1, %r8
+; X64-BMI1-NEXT:    shlq %cl, %r8
 ; X64-BMI1-NEXT:    xorl %eax, %eax
 ; X64-BMI1-NEXT:    testb $64, %cl
-; X64-BMI1-NEXT:    cmovneq %rsi, %rdi
-; X64-BMI1-NEXT:    cmovneq %rax, %rsi
-; X64-BMI1-NEXT:    movq %rdi, %rdx
-; X64-BMI1-NEXT:    shlq %cl, %rdx
-; X64-BMI1-NEXT:    testb $64, %cl
-; X64-BMI1-NEXT:    cmoveq %rdx, %rax
-; X64-BMI1-NEXT:    shldq %cl, %rdi, %rsi
-; X64-BMI1-NEXT:    testb $64, %cl
-; X64-BMI1-NEXT:    cmoveq %rsi, %rdx
+; X64-BMI1-NEXT:    cmovneq %r8, %rdx
+; X64-BMI1-NEXT:    cmoveq %r8, %rax
+; X64-BMI1-NEXT:    andq %rdi, %rax
+; X64-BMI1-NEXT:    andq %rsi, %rdx
 ; X64-BMI1-NEXT:    retq
 ;
 ; X64-BMI2-LABEL: mask_pair_128:
 ; X64-BMI2:       # %bb.0:
-; X64-BMI2-NEXT:    movq %rdx, %rcx
-; X64-BMI2-NEXT:    shrdq %cl, %rsi, %rdi
-; X64-BMI2-NEXT:    shrxq %rdx, %rsi, %rdx
-; X64-BMI2-NEXT:    xorl %esi, %esi
-; X64-BMI2-NEXT:    testb $64, %cl
-; X64-BMI2-NEXT:    cmovneq %rdx, %rdi
-; X64-BMI2-NEXT:    shlxq %rcx, %rdi, %r8
-; X64-BMI2-NEXT:    movq %r8, %rax
-; X64-BMI2-NEXT:    cmovneq %rsi, %rax
-; X64-BMI2-NEXT:    cmovneq %rsi, %rdx
-; X64-BMI2-NEXT:    shldq %cl, %rdi, %rdx
-; X64-BMI2-NEXT:    testb $64, %cl
-; X64-BMI2-NEXT:    cmovneq %r8, %rdx
+; X64-BMI2-NEXT:    movq $-1, %rcx
+; X64-BMI2-NEXT:    shlxq %rdx, %rcx, %r8
+; X64-BMI2-NEXT:    xorl %eax, %eax
+; X64-BMI2-NEXT:    testb $64, %dl
+; X64-BMI2-NEXT:    cmovneq %r8, %rcx
+; X64-BMI2-NEXT:    cmoveq %r8, %rax
+; X64-BMI2-NEXT:    andq %rdi, %rax
+; X64-BMI2-NEXT:    andq %rsi, %rcx
+; X64-BMI2-NEXT:    movq %rcx, %rdx
 ; X64-BMI2-NEXT:    retq
   %shl = shl nsw i128 -1, %y
   %and = and i128 %shl, %x
   ret i128 %and
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-GI: {{.*}}
-; CHECK-SD: {{.*}}
 ; X64: {{.*}}
 ; X64-BMINOTBM: {{.*}}
 ; X64-BMITBM: {{.*}}



More information about the llvm-commits mailing list