[llvm] [ARM][AArch64] shouldFoldMaskToVariableShiftPair should be true (PR #156886)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 4 07:27:40 PDT 2025
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/156886
>From 9df495e1fdebc6141f973fc599f2f802f2e9a27d Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 4 Sep 2025 09:32:45 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)
---
.../test/CodeGen/AArch64/and-mask-variable.ll | 30 ++
llvm/test/CodeGen/ARM/and-mask-variable.ll | 94 ++++
llvm/test/CodeGen/X86/and-mask-variable.ll | 450 ++++++++++++++++++
3 files changed, 574 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/and-mask-variable.ll
create mode 100644 llvm/test/CodeGen/ARM/and-mask-variable.ll
create mode 100644 llvm/test/CodeGen/X86/and-mask-variable.ll
diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
new file mode 100644
index 0000000000000..a92f3cf5ec092
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; CHECK-LABEL: mask_pair:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-1 // =0xffffffff
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w0, w8, w0
+; CHECK-NEXT: ret
+ %shl = shl nsw i32 -1, %y
+ %and = and i32 %shl, %x
+ ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; CHECK-LABEL: mask_pair_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x0, x8, x0
+; CHECK-NEXT: ret
+ %shl = shl nsw i64 -1, %y
+ %and = and i64 %shl, %x
+ ret i64 %and
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000000000..0b57fe278bf6e
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r2, #-1
+; V7M-NEXT: lsl.w r1, r2, r1
+; V7M-NEXT: ands r0, r1
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A: @ %bb.0:
+; V7A-NEXT: mvn r2, #0
+; V7A-NEXT: and r0, r0, r2, lsl r1
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r2, #-1
+; V7A-T-NEXT: lsl.w r1, r2, r1
+; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M: @ %bb.0:
+; V6M-NEXT: movs r2, #0
+; V6M-NEXT: mvns r2, r2
+; V6M-NEXT: lsls r2, r1
+; V6M-NEXT: ands r0, r2
+; V6M-NEXT: bx lr
+ %shl = shl nsw i32 -1, %y
+ %and = and i32 %shl, %x
+ ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M: @ %bb.0:
+; V7M-NEXT: mov.w r3, #-1
+; V7M-NEXT: lsl.w r12, r3, r2
+; V7M-NEXT: subs r2, #32
+; V7M-NEXT: it pl
+; V7M-NEXT: movpl.w r12, #0
+; V7M-NEXT: it pl
+; V7M-NEXT: lslpl r3, r2
+; V7M-NEXT: and.w r0, r0, r12
+; V7M-NEXT: ands r1, r3
+; V7M-NEXT: bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A: @ %bb.0:
+; V7A-NEXT: subs r12, r2, #32
+; V7A-NEXT: mvn r3, #0
+; V7A-NEXT: lsl r2, r3, r2
+; V7A-NEXT: lslpl r3, r3, r12
+; V7A-NEXT: movwpl r2, #0
+; V7A-NEXT: and r1, r3, r1
+; V7A-NEXT: and r0, r2, r0
+; V7A-NEXT: bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T: @ %bb.0:
+; V7A-T-NEXT: mov.w r3, #-1
+; V7A-T-NEXT: lsl.w r12, r3, r2
+; V7A-T-NEXT: subs r2, #32
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: movpl.w r12, #0
+; V7A-T-NEXT: it pl
+; V7A-T-NEXT: lslpl r3, r2
+; V7A-T-NEXT: and.w r0, r0, r12
+; V7A-T-NEXT: ands r1, r3
+; V7A-T-NEXT: bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M: @ %bb.0:
+; V6M-NEXT: .save {r4, r5, r7, lr}
+; V6M-NEXT: push {r4, r5, r7, lr}
+; V6M-NEXT: mov r4, r1
+; V6M-NEXT: mov r5, r0
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: mvns r0, r0
+; V6M-NEXT: mov r1, r0
+; V6M-NEXT: bl __aeabi_llsl
+; V6M-NEXT: ands r0, r5
+; V6M-NEXT: ands r1, r4
+; V6M-NEXT: pop {r4, r5, r7, pc}
+ %shl = shl nsw i64 -1, %y
+ %and = and i64 %shl, %x
+ ret i64 %and
+}
diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll
new file mode 100644
index 0000000000000..54daa86dc6f36
--- /dev/null
+++ b/llvm/test/CodeGen/X86/and-mask-variable.ll
@@ -0,0 +1,450 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI1
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI2
+; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI1
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI2
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI2
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; X86-NOBMI-LABEL: mask_pair:
+; X86-NOBMI: # %bb.0:
+; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT: shrl %cl, %eax
+; X86-NOBMI-NEXT: shll %cl, %eax
+; X86-NOBMI-NEXT: retl
+;
+; X86-BMI1-LABEL: mask_pair:
+; X86-BMI1: # %bb.0:
+; X86-BMI1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT: shrl %cl, %eax
+; X86-BMI1-NEXT: shll %cl, %eax
+; X86-BMI1-NEXT: retl
+;
+; X86-BMI2-LABEL: mask_pair:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI-LABEL: mask_pair:
+; X64-NOBMI: # %bb.0:
+; X64-NOBMI-NEXT: movl %esi, %ecx
+; X64-NOBMI-NEXT: movl %edi, %eax
+; X64-NOBMI-NEXT: shrl %cl, %eax
+; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NOBMI-NEXT: shll %cl, %eax
+; X64-NOBMI-NEXT: retq
+;
+; X64-BMI1-LABEL: mask_pair:
+; X64-BMI1: # %bb.0:
+; X64-BMI1-NEXT: movl %esi, %ecx
+; X64-BMI1-NEXT: movl %edi, %eax
+; X64-BMI1-NEXT: shrl %cl, %eax
+; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-BMI1-NEXT: shll %cl, %eax
+; X64-BMI1-NEXT: retq
+;
+; X64-BMI2-LABEL: mask_pair:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: shrxl %esi, %edi, %eax
+; X64-BMI2-NEXT: shlxl %esi, %eax, %eax
+; X64-BMI2-NEXT: retq
+ %shl = shl nsw i32 -1, %y
+ %and = and i32 %shl, %x
+ ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; X86-NOBMI-LABEL: mask_pair_64:
+; X86-NOBMI: # %bb.0:
+; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT: movl $-1, %edx
+; X86-NOBMI-NEXT: movl $-1, %eax
+; X86-NOBMI-NEXT: shll %cl, %eax
+; X86-NOBMI-NEXT: testb $32, %cl
+; X86-NOBMI-NEXT: je .LBB1_2
+; X86-NOBMI-NEXT: # %bb.1:
+; X86-NOBMI-NEXT: movl %eax, %edx
+; X86-NOBMI-NEXT: xorl %eax, %eax
+; X86-NOBMI-NEXT: .LBB1_2:
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT: retl
+;
+; X86-BMI1-LABEL: mask_pair_64:
+; X86-BMI1: # %bb.0:
+; X86-BMI1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT: movl $-1, %edx
+; X86-BMI1-NEXT: movl $-1, %eax
+; X86-BMI1-NEXT: shll %cl, %eax
+; X86-BMI1-NEXT: testb $32, %cl
+; X86-BMI1-NEXT: je .LBB1_2
+; X86-BMI1-NEXT: # %bb.1:
+; X86-BMI1-NEXT: movl %eax, %edx
+; X86-BMI1-NEXT: xorl %eax, %eax
+; X86-BMI1-NEXT: .LBB1_2:
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT: retl
+;
+; X86-BMI2-LABEL: mask_pair_64:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl $-1, %edx
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax
+; X86-BMI2-NEXT: testb $32, %cl
+; X86-BMI2-NEXT: je .LBB1_2
+; X86-BMI2-NEXT: # %bb.1:
+; X86-BMI2-NEXT: movl %eax, %edx
+; X86-BMI2-NEXT: xorl %eax, %eax
+; X86-BMI2-NEXT: .LBB1_2:
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: retl
+;
+; X64-NOBMI-LABEL: mask_pair_64:
+; X64-NOBMI: # %bb.0:
+; X64-NOBMI-NEXT: movq %rsi, %rcx
+; X64-NOBMI-NEXT: movq %rdi, %rax
+; X64-NOBMI-NEXT: shrq %cl, %rax
+; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-NOBMI-NEXT: shlq %cl, %rax
+; X64-NOBMI-NEXT: retq
+;
+; X64-BMI1-LABEL: mask_pair_64:
+; X64-BMI1: # %bb.0:
+; X64-BMI1-NEXT: movq %rsi, %rcx
+; X64-BMI1-NEXT: movq %rdi, %rax
+; X64-BMI1-NEXT: shrq %cl, %rax
+; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-BMI1-NEXT: shlq %cl, %rax
+; X64-BMI1-NEXT: retq
+;
+; X64-BMI2-LABEL: mask_pair_64:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: shrxq %rsi, %rdi, %rax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: retq
+ %shl = shl nsw i64 -1, %y
+ %and = and i64 %shl, %x
+ ret i64 %and
+}
+
+define i128 @mask_pair_128(i128 %x, i128 %y) {
+; X86-NOBMI-LABEL: mask_pair_128:
+; X86-NOBMI: # %bb.0:
+; X86-NOBMI-NEXT: pushl %ebp
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8
+; X86-NOBMI-NEXT: pushl %ebx
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT: pushl %edi
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT: pushl %esi
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 20
+; X86-NOBMI-NEXT: subl $76, %esp
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 96
+; X86-NOBMI-NEXT: .cfi_offset %esi, -20
+; X86-NOBMI-NEXT: .cfi_offset %edi, -16
+; X86-NOBMI-NEXT: .cfi_offset %ebx, -12
+; X86-NOBMI-NEXT: .cfi_offset %ebp, -8
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %ecx, %edx
+; X86-NOBMI-NEXT: shrb $3, %dl
+; X86-NOBMI-NEXT: andb $12, %dl
+; X86-NOBMI-NEXT: movzbl %dl, %esi
+; X86-NOBMI-NEXT: movl 44(%esp,%esi), %edi
+; X86-NOBMI-NEXT: movl %edi, %ebx
+; X86-NOBMI-NEXT: shrl %cl, %ebx
+; X86-NOBMI-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl 40(%esp,%esi), %ebx
+; X86-NOBMI-NEXT: movl %ebx, %ebp
+; X86-NOBMI-NEXT: shrdl %cl, %edi, %ebp
+; X86-NOBMI-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl 32(%esp,%esi), %edi
+; X86-NOBMI-NEXT: movl 36(%esp,%esi), %esi
+; X86-NOBMI-NEXT: movl %esi, %ebp
+; X86-NOBMI-NEXT: shrdl %cl, %ebx, %ebp
+; X86-NOBMI-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: shrdl %cl, %esi, %edi
+; X86-NOBMI-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, (%esp)
+; X86-NOBMI-NEXT: negb %dl
+; X86-NOBMI-NEXT: movsbl %dl, %edi
+; X86-NOBMI-NEXT: movl 16(%esp,%edi), %edx
+; X86-NOBMI-NEXT: movl 20(%esp,%edi), %esi
+; X86-NOBMI-NEXT: movl 24(%esp,%edi), %ebx
+; X86-NOBMI-NEXT: movl %ebx, %ebp
+; X86-NOBMI-NEXT: shldl %cl, %esi, %ebp
+; X86-NOBMI-NEXT: movl 28(%esp,%edi), %edi
+; X86-NOBMI-NEXT: shldl %cl, %ebx, %edi
+; X86-NOBMI-NEXT: movl %edi, 12(%eax)
+; X86-NOBMI-NEXT: movl %ebp, 8(%eax)
+; X86-NOBMI-NEXT: movl %edx, %edi
+; X86-NOBMI-NEXT: shll %cl, %edi
+; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT: shldl %cl, %edx, %esi
+; X86-NOBMI-NEXT: movl %esi, 4(%eax)
+; X86-NOBMI-NEXT: movl %edi, (%eax)
+; X86-NOBMI-NEXT: addl $76, %esp
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 20
+; X86-NOBMI-NEXT: popl %esi
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT: popl %edi
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT: popl %ebx
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8
+; X86-NOBMI-NEXT: popl %ebp
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 4
+; X86-NOBMI-NEXT: retl $4
+;
+; X86-BMI1-LABEL: mask_pair_128:
+; X86-BMI1: # %bb.0:
+; X86-BMI1-NEXT: pushl %ebp
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 8
+; X86-BMI1-NEXT: pushl %ebx
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT: pushl %edi
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT: pushl %esi
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 20
+; X86-BMI1-NEXT: subl $76, %esp
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 96
+; X86-BMI1-NEXT: .cfi_offset %esi, -20
+; X86-BMI1-NEXT: .cfi_offset %edi, -16
+; X86-BMI1-NEXT: .cfi_offset %ebx, -12
+; X86-BMI1-NEXT: .cfi_offset %ebp, -8
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-BMI1-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl %ecx, %edx
+; X86-BMI1-NEXT: shrb $3, %dl
+; X86-BMI1-NEXT: andb $12, %dl
+; X86-BMI1-NEXT: movzbl %dl, %esi
+; X86-BMI1-NEXT: movl 44(%esp,%esi), %edi
+; X86-BMI1-NEXT: movl %edi, %ebx
+; X86-BMI1-NEXT: shrl %cl, %ebx
+; X86-BMI1-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl 40(%esp,%esi), %ebx
+; X86-BMI1-NEXT: movl %ebx, %ebp
+; X86-BMI1-NEXT: shrdl %cl, %edi, %ebp
+; X86-BMI1-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl 32(%esp,%esi), %edi
+; X86-BMI1-NEXT: movl 36(%esp,%esi), %esi
+; X86-BMI1-NEXT: movl %esi, %ebp
+; X86-BMI1-NEXT: shrdl %cl, %ebx, %ebp
+; X86-BMI1-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: shrdl %cl, %esi, %edi
+; X86-BMI1-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, (%esp)
+; X86-BMI1-NEXT: negb %dl
+; X86-BMI1-NEXT: movsbl %dl, %edi
+; X86-BMI1-NEXT: movl 16(%esp,%edi), %edx
+; X86-BMI1-NEXT: movl 20(%esp,%edi), %esi
+; X86-BMI1-NEXT: movl 24(%esp,%edi), %ebx
+; X86-BMI1-NEXT: movl %ebx, %ebp
+; X86-BMI1-NEXT: shldl %cl, %esi, %ebp
+; X86-BMI1-NEXT: movl 28(%esp,%edi), %edi
+; X86-BMI1-NEXT: shldl %cl, %ebx, %edi
+; X86-BMI1-NEXT: movl %edi, 12(%eax)
+; X86-BMI1-NEXT: movl %ebp, 8(%eax)
+; X86-BMI1-NEXT: movl %edx, %edi
+; X86-BMI1-NEXT: shll %cl, %edi
+; X86-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT: shldl %cl, %edx, %esi
+; X86-BMI1-NEXT: movl %esi, 4(%eax)
+; X86-BMI1-NEXT: movl %edi, (%eax)
+; X86-BMI1-NEXT: addl $76, %esp
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 20
+; X86-BMI1-NEXT: popl %esi
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT: popl %edi
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT: popl %ebx
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 8
+; X86-BMI1-NEXT: popl %ebp
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 4
+; X86-BMI1-NEXT: retl $4
+;
+; X86-BMI2-LABEL: mask_pair_128:
+; X86-BMI2: # %bb.0:
+; X86-BMI2-NEXT: pushl %ebp
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 8
+; X86-BMI2-NEXT: pushl %ebx
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT: pushl %edi
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT: pushl %esi
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 20
+; X86-BMI2-NEXT: subl $76, %esp
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 96
+; X86-BMI2-NEXT: .cfi_offset %esi, -20
+; X86-BMI2-NEXT: .cfi_offset %edi, -16
+; X86-BMI2-NEXT: .cfi_offset %ebx, -12
+; X86-BMI2-NEXT: .cfi_offset %ebp, -8
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl %ecx, %edx
+; X86-BMI2-NEXT: shrb $3, %dl
+; X86-BMI2-NEXT: andb $12, %dl
+; X86-BMI2-NEXT: movzbl %dl, %esi
+; X86-BMI2-NEXT: movl 44(%esp,%esi), %edi
+; X86-BMI2-NEXT: shrxl %ecx, %edi, %ebx
+; X86-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl 40(%esp,%esi), %ebx
+; X86-BMI2-NEXT: movl %ebx, %ebp
+; X86-BMI2-NEXT: shrdl %cl, %edi, %ebp
+; X86-BMI2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl 32(%esp,%esi), %edi
+; X86-BMI2-NEXT: movl 36(%esp,%esi), %esi
+; X86-BMI2-NEXT: movl %esi, %ebp
+; X86-BMI2-NEXT: shrdl %cl, %ebx, %ebp
+; X86-BMI2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: shrdl %cl, %esi, %edi
+; X86-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, (%esp)
+; X86-BMI2-NEXT: negb %dl
+; X86-BMI2-NEXT: movsbl %dl, %edi
+; X86-BMI2-NEXT: movl 16(%esp,%edi), %edx
+; X86-BMI2-NEXT: movl 20(%esp,%edi), %esi
+; X86-BMI2-NEXT: movl 24(%esp,%edi), %ebx
+; X86-BMI2-NEXT: movl %ebx, %ebp
+; X86-BMI2-NEXT: shldl %cl, %esi, %ebp
+; X86-BMI2-NEXT: movl 28(%esp,%edi), %edi
+; X86-BMI2-NEXT: shldl %cl, %ebx, %edi
+; X86-BMI2-NEXT: movl %edi, 12(%eax)
+; X86-BMI2-NEXT: movl %ebp, 8(%eax)
+; X86-BMI2-NEXT: shlxl %ecx, %edx, %edi
+; X86-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-BMI2-NEXT: shldl %cl, %edx, %esi
+; X86-BMI2-NEXT: movl %esi, 4(%eax)
+; X86-BMI2-NEXT: movl %edi, (%eax)
+; X86-BMI2-NEXT: addl $76, %esp
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 20
+; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT: popl %edi
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT: popl %ebx
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 8
+; X86-BMI2-NEXT: popl %ebp
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 4
+; X86-BMI2-NEXT: retl $4
+;
+; X64-NOBMI-LABEL: mask_pair_128:
+; X64-NOBMI: # %bb.0:
+; X64-NOBMI-NEXT: movq %rdx, %rcx
+; X64-NOBMI-NEXT: shrdq %cl, %rsi, %rdi
+; X64-NOBMI-NEXT: shrq %cl, %rsi
+; X64-NOBMI-NEXT: xorl %eax, %eax
+; X64-NOBMI-NEXT: testb $64, %cl
+; X64-NOBMI-NEXT: cmovneq %rsi, %rdi
+; X64-NOBMI-NEXT: cmovneq %rax, %rsi
+; X64-NOBMI-NEXT: movq %rdi, %rdx
+; X64-NOBMI-NEXT: shlq %cl, %rdx
+; X64-NOBMI-NEXT: testb $64, %cl
+; X64-NOBMI-NEXT: cmoveq %rdx, %rax
+; X64-NOBMI-NEXT: shldq %cl, %rdi, %rsi
+; X64-NOBMI-NEXT: testb $64, %cl
+; X64-NOBMI-NEXT: cmoveq %rsi, %rdx
+; X64-NOBMI-NEXT: retq
+;
+; X64-BMI1-LABEL: mask_pair_128:
+; X64-BMI1: # %bb.0:
+; X64-BMI1-NEXT: movq %rdx, %rcx
+; X64-BMI1-NEXT: shrdq %cl, %rsi, %rdi
+; X64-BMI1-NEXT: shrq %cl, %rsi
+; X64-BMI1-NEXT: xorl %eax, %eax
+; X64-BMI1-NEXT: testb $64, %cl
+; X64-BMI1-NEXT: cmovneq %rsi, %rdi
+; X64-BMI1-NEXT: cmovneq %rax, %rsi
+; X64-BMI1-NEXT: movq %rdi, %rdx
+; X64-BMI1-NEXT: shlq %cl, %rdx
+; X64-BMI1-NEXT: testb $64, %cl
+; X64-BMI1-NEXT: cmoveq %rdx, %rax
+; X64-BMI1-NEXT: shldq %cl, %rdi, %rsi
+; X64-BMI1-NEXT: testb $64, %cl
+; X64-BMI1-NEXT: cmoveq %rsi, %rdx
+; X64-BMI1-NEXT: retq
+;
+; X64-BMI2-LABEL: mask_pair_128:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movq %rdx, %rcx
+; X64-BMI2-NEXT: shrdq %cl, %rsi, %rdi
+; X64-BMI2-NEXT: shrxq %rdx, %rsi, %rdx
+; X64-BMI2-NEXT: xorl %esi, %esi
+; X64-BMI2-NEXT: testb $64, %cl
+; X64-BMI2-NEXT: cmovneq %rdx, %rdi
+; X64-BMI2-NEXT: shlxq %rcx, %rdi, %r8
+; X64-BMI2-NEXT: movq %r8, %rax
+; X64-BMI2-NEXT: cmovneq %rsi, %rax
+; X64-BMI2-NEXT: cmovneq %rsi, %rdx
+; X64-BMI2-NEXT: shldq %cl, %rdi, %rdx
+; X64-BMI2-NEXT: testb $64, %cl
+; X64-BMI2-NEXT: cmovneq %r8, %rdx
+; X64-BMI2-NEXT: retq
+ %shl = shl nsw i128 -1, %y
+ %and = and i128 %shl, %x
+ ret i128 %and
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
+; X64: {{.*}}
+; X64-BMINOTBM: {{.*}}
+; X64-BMITBM: {{.*}}
+; X86: {{.*}}
+; X86-BMINOTBM: {{.*}}
+; X86-BMITBM: {{.*}}
>From c378f963053c2eff276b43512f283b385d9426f1 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 4 Sep 2025 09:54:59 -0400
Subject: [PATCH 2/2] [X86][ARM][AArch64] shouldFoldMaskToVariableShiftPair
should be true for legal types
For ARM, AArch64, and X86, we want to do this for legal non-vectors only.
---
llvm/lib/Target/AArch64/AArch64ISelLowering.h | 10 +
llvm/lib/Target/ARM/ARMISelLowering.h | 5 +
llvm/lib/Target/X86/X86ISelLowering.cpp | 7 +-
.../test/CodeGen/AArch64/and-mask-variable.ll | 78 +++-
llvm/test/CodeGen/ARM/and-mask-variable.ll | 20 +-
llvm/test/CodeGen/X86/and-mask-variable.ll | 336 +++++++-----------
6 files changed, 209 insertions(+), 247 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 46738365080f9..9bc1725fc4c4b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -300,6 +300,16 @@ class AArch64TargetLowering : public TargetLowering {
bool shouldFoldConstantShiftPairToMask(const SDNode *N,
CombineLevel Level) const override;
+ /// Return true if it is profitable to fold a pair of shifts into a mask.
+ bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+ EVT VT = Y.getValueType();
+
+ if (!VT.isVector())
+ return false;
+
+ return isTypeLegal(VT);
+ }
+
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
unsigned SelectOpcode, SDValue X,
SDValue Y) const override;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 196ecb1b9f678..5a3baafb57b01 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -773,6 +773,11 @@ class VectorType;
bool shouldFoldConstantShiftPairToMask(const SDNode *N,
CombineLevel Level) const override;
+ /// Return true if it is profitable to fold a pair of shifts into a mask.
+ bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+ return isTypeLegal(Y.getValueType());
+ }
+
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
unsigned SelectOpcode, SDValue X,
SDValue Y) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 08ae0d52d795e..1fd8b0e3d4b5e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3663,11 +3663,8 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
if (VT.isVector())
return false;
- // 64-bit shifts on 32-bit targets produce really bad bloated code.
- if (VT == MVT::i64 && !Subtarget.is64Bit())
- return false;
-
- return true;
+ // Non-legal shifts produce really bad bloated code.
+ return isTypeLegal(VT);
}
TargetLowering::ShiftLegalizationStrategy
diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
index a92f3cf5ec092..891d70772845b 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-variable.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll
@@ -3,28 +3,76 @@
; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i32 @mask_pair(i32 %x, i32 %y) {
-; CHECK-LABEL: mask_pair:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w0, w8, w0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mask_pair:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr w8, w0, w1
+; CHECK-SD-NEXT: lsl w0, w8, w1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mask_pair:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-1 // =0xffffffff
+; CHECK-GI-NEXT: lsl w8, w8, w1
+; CHECK-GI-NEXT: and w0, w8, w0
+; CHECK-GI-NEXT: ret
%shl = shl nsw i32 -1, %y
%and = and i32 %shl, %x
ret i32 %and
}
define i64 @mask_pair_64(i64 %x, i64 %y) {
-; CHECK-LABEL: mask_pair_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: lsl x8, x8, x1
-; CHECK-NEXT: and x0, x8, x0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: mask_pair_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr x8, x0, x1
+; CHECK-SD-NEXT: lsl x0, x8, x1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mask_pair_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-1 // =0xffffffffffffffff
+; CHECK-GI-NEXT: lsl x8, x8, x1
+; CHECK-GI-NEXT: and x0, x8, x0
+; CHECK-GI-NEXT: ret
%shl = shl nsw i64 -1, %y
%and = and i64 %shl, %x
ret i64 %and
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-GI: {{.*}}
-; CHECK-SD: {{.*}}
+
+define i128 @mask_pair_128(i128 %x, i128 %y) {
+; CHECK-SD-LABEL: mask_pair_128:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov x8, #-1 // =0xffffffffffffffff
+; CHECK-SD-NEXT: mvn w9, w2
+; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff
+; CHECK-SD-NEXT: lsl x8, x8, x2
+; CHECK-SD-NEXT: lsr x9, x10, x9
+; CHECK-SD-NEXT: tst x2, #0x40
+; CHECK-SD-NEXT: orr x9, x8, x9
+; CHECK-SD-NEXT: csel x9, x8, x9, ne
+; CHECK-SD-NEXT: csel x8, xzr, x8, ne
+; CHECK-SD-NEXT: and x0, x8, x0
+; CHECK-SD-NEXT: and x1, x9, x1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: mask_pair_128:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #64 // =0x40
+; CHECK-GI-NEXT: mov x9, #-1 // =0xffffffffffffffff
+; CHECK-GI-NEXT: sub x10, x2, #64
+; CHECK-GI-NEXT: sub x8, x8, x2
+; CHECK-GI-NEXT: lsl x11, x9, x2
+; CHECK-GI-NEXT: cmp x2, #64
+; CHECK-GI-NEXT: lsr x8, x9, x8
+; CHECK-GI-NEXT: lsl x9, x9, x10
+; CHECK-GI-NEXT: csel x10, x11, xzr, lo
+; CHECK-GI-NEXT: orr x8, x8, x11
+; CHECK-GI-NEXT: and x0, x10, x0
+; CHECK-GI-NEXT: csel x8, x8, x9, lo
+; CHECK-GI-NEXT: cmp x2, #0
+; CHECK-GI-NEXT: csinv x8, x8, xzr, ne
+; CHECK-GI-NEXT: and x1, x8, x1
+; CHECK-GI-NEXT: ret
+ %shl = shl nsw i128 -1, %y
+ %and = and i128 %shl, %x
+ ret i128 %and
+}
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
index 0b57fe278bf6e..0f84b76f97a6b 100644
--- a/llvm/test/CodeGen/ARM/and-mask-variable.ll
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -7,30 +7,26 @@
define i32 @mask_pair(i32 %x, i32 %y) {
; V7M-LABEL: mask_pair:
; V7M: @ %bb.0:
-; V7M-NEXT: mov.w r2, #-1
-; V7M-NEXT: lsl.w r1, r2, r1
-; V7M-NEXT: ands r0, r1
+; V7M-NEXT: lsrs r0, r1
+; V7M-NEXT: lsls r0, r1
; V7M-NEXT: bx lr
;
; V7A-LABEL: mask_pair:
; V7A: @ %bb.0:
-; V7A-NEXT: mvn r2, #0
-; V7A-NEXT: and r0, r0, r2, lsl r1
+; V7A-NEXT: lsr r0, r0, r1
+; V7A-NEXT: lsl r0, r0, r1
; V7A-NEXT: bx lr
;
; V7A-T-LABEL: mask_pair:
; V7A-T: @ %bb.0:
-; V7A-T-NEXT: mov.w r2, #-1
-; V7A-T-NEXT: lsl.w r1, r2, r1
-; V7A-T-NEXT: ands r0, r1
+; V7A-T-NEXT: lsrs r0, r1
+; V7A-T-NEXT: lsls r0, r1
; V7A-T-NEXT: bx lr
;
; V6M-LABEL: mask_pair:
; V6M: @ %bb.0:
-; V6M-NEXT: movs r2, #0
-; V6M-NEXT: mvns r2, r2
-; V6M-NEXT: lsls r2, r1
-; V6M-NEXT: ands r0, r2
+; V6M-NEXT: lsrs r0, r1
+; V6M-NEXT: lsls r0, r1
; V6M-NEXT: bx lr
%shl = shl nsw i32 -1, %y
%and = and i32 %shl, %x
diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll
index 54daa86dc6f36..844a413391d75 100644
--- a/llvm/test/CodeGen/X86/and-mask-variable.ll
+++ b/llvm/test/CodeGen/X86/and-mask-variable.ll
@@ -141,307 +141,213 @@ define i64 @mask_pair_64(i64 %x, i64 %y) {
define i128 @mask_pair_128(i128 %x, i128 %y) {
; X86-NOBMI-LABEL: mask_pair_128:
; X86-NOBMI: # %bb.0:
-; X86-NOBMI-NEXT: pushl %ebp
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8
; X86-NOBMI-NEXT: pushl %ebx
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8
; X86-NOBMI-NEXT: pushl %edi
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12
; X86-NOBMI-NEXT: pushl %esi
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 20
-; X86-NOBMI-NEXT: subl $76, %esp
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 96
-; X86-NOBMI-NEXT: .cfi_offset %esi, -20
-; X86-NOBMI-NEXT: .cfi_offset %edi, -16
-; X86-NOBMI-NEXT: .cfi_offset %ebx, -12
-; X86-NOBMI-NEXT: .cfi_offset %ebp, -8
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16
+; X86-NOBMI-NEXT: subl $32, %esp
+; X86-NOBMI-NEXT: .cfi_def_cfa_offset 48
+; X86-NOBMI-NEXT: .cfi_offset %esi, -16
+; X86-NOBMI-NEXT: .cfi_offset %edi, -12
+; X86-NOBMI-NEXT: .cfi_offset %ebx, -8
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NOBMI-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp)
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl $0, (%esp)
; X86-NOBMI-NEXT: movl %ecx, %edx
; X86-NOBMI-NEXT: shrb $3, %dl
; X86-NOBMI-NEXT: andb $12, %dl
-; X86-NOBMI-NEXT: movzbl %dl, %esi
-; X86-NOBMI-NEXT: movl 44(%esp,%esi), %edi
-; X86-NOBMI-NEXT: movl %edi, %ebx
-; X86-NOBMI-NEXT: shrl %cl, %ebx
-; X86-NOBMI-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl 40(%esp,%esi), %ebx
-; X86-NOBMI-NEXT: movl %ebx, %ebp
-; X86-NOBMI-NEXT: shrdl %cl, %edi, %ebp
-; X86-NOBMI-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl 32(%esp,%esi), %edi
-; X86-NOBMI-NEXT: movl 36(%esp,%esi), %esi
-; X86-NOBMI-NEXT: movl %esi, %ebp
-; X86-NOBMI-NEXT: shrdl %cl, %ebx, %ebp
-; X86-NOBMI-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: shrdl %cl, %esi, %edi
-; X86-NOBMI-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: movl $0, (%esp)
; X86-NOBMI-NEXT: negb %dl
-; X86-NOBMI-NEXT: movsbl %dl, %edi
-; X86-NOBMI-NEXT: movl 16(%esp,%edi), %edx
-; X86-NOBMI-NEXT: movl 20(%esp,%edi), %esi
-; X86-NOBMI-NEXT: movl 24(%esp,%edi), %ebx
-; X86-NOBMI-NEXT: movl %ebx, %ebp
-; X86-NOBMI-NEXT: shldl %cl, %esi, %ebp
-; X86-NOBMI-NEXT: movl 28(%esp,%edi), %edi
-; X86-NOBMI-NEXT: shldl %cl, %ebx, %edi
-; X86-NOBMI-NEXT: movl %edi, 12(%eax)
-; X86-NOBMI-NEXT: movl %ebp, 8(%eax)
-; X86-NOBMI-NEXT: movl %edx, %edi
-; X86-NOBMI-NEXT: shll %cl, %edi
-; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT: movsbl %dl, %ebx
+; X86-NOBMI-NEXT: movl 24(%esp,%ebx), %edx
+; X86-NOBMI-NEXT: movl 28(%esp,%ebx), %esi
; X86-NOBMI-NEXT: shldl %cl, %edx, %esi
-; X86-NOBMI-NEXT: movl %esi, 4(%eax)
+; X86-NOBMI-NEXT: movl 16(%esp,%ebx), %edi
+; X86-NOBMI-NEXT: movl 20(%esp,%ebx), %ebx
+; X86-NOBMI-NEXT: shldl %cl, %ebx, %edx
+; X86-NOBMI-NEXT: shldl %cl, %edi, %ebx
+; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NOBMI-NEXT: shll %cl, %edi
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT: movl %esi, 12(%eax)
+; X86-NOBMI-NEXT: movl %edx, 8(%eax)
+; X86-NOBMI-NEXT: movl %ebx, 4(%eax)
; X86-NOBMI-NEXT: movl %edi, (%eax)
-; X86-NOBMI-NEXT: addl $76, %esp
-; X86-NOBMI-NEXT: .cfi_def_cfa_offset 20
-; X86-NOBMI-NEXT: popl %esi
+; X86-NOBMI-NEXT: addl $32, %esp
; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16
-; X86-NOBMI-NEXT: popl %edi
+; X86-NOBMI-NEXT: popl %esi
; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12
-; X86-NOBMI-NEXT: popl %ebx
+; X86-NOBMI-NEXT: popl %edi
; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8
-; X86-NOBMI-NEXT: popl %ebp
+; X86-NOBMI-NEXT: popl %ebx
; X86-NOBMI-NEXT: .cfi_def_cfa_offset 4
; X86-NOBMI-NEXT: retl $4
;
; X86-BMI1-LABEL: mask_pair_128:
; X86-BMI1: # %bb.0:
-; X86-BMI1-NEXT: pushl %ebp
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 8
; X86-BMI1-NEXT: pushl %ebx
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 8
; X86-BMI1-NEXT: pushl %edi
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 12
; X86-BMI1-NEXT: pushl %esi
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 20
-; X86-BMI1-NEXT: subl $76, %esp
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 96
-; X86-BMI1-NEXT: .cfi_offset %esi, -20
-; X86-BMI1-NEXT: .cfi_offset %edi, -16
-; X86-BMI1-NEXT: .cfi_offset %ebx, -12
-; X86-BMI1-NEXT: .cfi_offset %ebp, -8
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI1-NEXT: subl $32, %esp
+; X86-BMI1-NEXT: .cfi_def_cfa_offset 48
+; X86-BMI1-NEXT: .cfi_offset %esi, -16
+; X86-BMI1-NEXT: .cfi_offset %edi, -12
+; X86-BMI1-NEXT: .cfi_offset %ebx, -8
; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-BMI1-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp)
; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI1-NEXT: movl $0, (%esp)
; X86-BMI1-NEXT: movl %ecx, %edx
; X86-BMI1-NEXT: shrb $3, %dl
; X86-BMI1-NEXT: andb $12, %dl
-; X86-BMI1-NEXT: movzbl %dl, %esi
-; X86-BMI1-NEXT: movl 44(%esp,%esi), %edi
-; X86-BMI1-NEXT: movl %edi, %ebx
-; X86-BMI1-NEXT: shrl %cl, %ebx
-; X86-BMI1-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl 40(%esp,%esi), %ebx
-; X86-BMI1-NEXT: movl %ebx, %ebp
-; X86-BMI1-NEXT: shrdl %cl, %edi, %ebp
-; X86-BMI1-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl 32(%esp,%esi), %edi
-; X86-BMI1-NEXT: movl 36(%esp,%esi), %esi
-; X86-BMI1-NEXT: movl %esi, %ebp
-; X86-BMI1-NEXT: shrdl %cl, %ebx, %ebp
-; X86-BMI1-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: shrdl %cl, %esi, %edi
-; X86-BMI1-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI1-NEXT: movl $0, (%esp)
; X86-BMI1-NEXT: negb %dl
-; X86-BMI1-NEXT: movsbl %dl, %edi
-; X86-BMI1-NEXT: movl 16(%esp,%edi), %edx
-; X86-BMI1-NEXT: movl 20(%esp,%edi), %esi
-; X86-BMI1-NEXT: movl 24(%esp,%edi), %ebx
-; X86-BMI1-NEXT: movl %ebx, %ebp
-; X86-BMI1-NEXT: shldl %cl, %esi, %ebp
-; X86-BMI1-NEXT: movl 28(%esp,%edi), %edi
-; X86-BMI1-NEXT: shldl %cl, %ebx, %edi
-; X86-BMI1-NEXT: movl %edi, 12(%eax)
-; X86-BMI1-NEXT: movl %ebp, 8(%eax)
-; X86-BMI1-NEXT: movl %edx, %edi
-; X86-BMI1-NEXT: shll %cl, %edi
-; X86-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT: movsbl %dl, %ebx
+; X86-BMI1-NEXT: movl 24(%esp,%ebx), %edx
+; X86-BMI1-NEXT: movl 28(%esp,%ebx), %esi
; X86-BMI1-NEXT: shldl %cl, %edx, %esi
-; X86-BMI1-NEXT: movl %esi, 4(%eax)
+; X86-BMI1-NEXT: movl 16(%esp,%ebx), %edi
+; X86-BMI1-NEXT: movl 20(%esp,%ebx), %ebx
+; X86-BMI1-NEXT: shldl %cl, %ebx, %edx
+; X86-BMI1-NEXT: shldl %cl, %edi, %ebx
+; X86-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-BMI1-NEXT: shll %cl, %edi
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; X86-BMI1-NEXT: movl %esi, 12(%eax)
+; X86-BMI1-NEXT: movl %edx, 8(%eax)
+; X86-BMI1-NEXT: movl %ebx, 4(%eax)
; X86-BMI1-NEXT: movl %edi, (%eax)
-; X86-BMI1-NEXT: addl $76, %esp
-; X86-BMI1-NEXT: .cfi_def_cfa_offset 20
-; X86-BMI1-NEXT: popl %esi
+; X86-BMI1-NEXT: addl $32, %esp
; X86-BMI1-NEXT: .cfi_def_cfa_offset 16
-; X86-BMI1-NEXT: popl %edi
+; X86-BMI1-NEXT: popl %esi
; X86-BMI1-NEXT: .cfi_def_cfa_offset 12
-; X86-BMI1-NEXT: popl %ebx
+; X86-BMI1-NEXT: popl %edi
; X86-BMI1-NEXT: .cfi_def_cfa_offset 8
-; X86-BMI1-NEXT: popl %ebp
+; X86-BMI1-NEXT: popl %ebx
; X86-BMI1-NEXT: .cfi_def_cfa_offset 4
; X86-BMI1-NEXT: retl $4
;
; X86-BMI2-LABEL: mask_pair_128:
; X86-BMI2: # %bb.0:
-; X86-BMI2-NEXT: pushl %ebp
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 8
; X86-BMI2-NEXT: pushl %ebx
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 12
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 8
; X86-BMI2-NEXT: pushl %edi
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 12
; X86-BMI2-NEXT: pushl %esi
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 20
-; X86-BMI2-NEXT: subl $76, %esp
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 96
-; X86-BMI2-NEXT: .cfi_offset %esi, -20
-; X86-BMI2-NEXT: .cfi_offset %edi, -16
-; X86-BMI2-NEXT: .cfi_offset %ebx, -12
-; X86-BMI2-NEXT: .cfi_offset %ebp, -8
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 16
+; X86-BMI2-NEXT: subl $32, %esp
+; X86-BMI2-NEXT: .cfi_def_cfa_offset 48
+; X86-BMI2-NEXT: .cfi_offset %esi, -16
+; X86-BMI2-NEXT: .cfi_offset %edi, -12
+; X86-BMI2-NEXT: .cfi_offset %ebx, -8
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp)
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-BMI2-NEXT: movl $0, (%esp)
; X86-BMI2-NEXT: movl %ecx, %edx
; X86-BMI2-NEXT: shrb $3, %dl
; X86-BMI2-NEXT: andb $12, %dl
-; X86-BMI2-NEXT: movzbl %dl, %esi
-; X86-BMI2-NEXT: movl 44(%esp,%esi), %edi
-; X86-BMI2-NEXT: shrxl %ecx, %edi, %ebx
-; X86-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl 40(%esp,%esi), %ebx
-; X86-BMI2-NEXT: movl %ebx, %ebp
-; X86-BMI2-NEXT: shrdl %cl, %edi, %ebp
-; X86-BMI2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl 32(%esp,%esi), %edi
-; X86-BMI2-NEXT: movl 36(%esp,%esi), %esi
-; X86-BMI2-NEXT: movl %esi, %ebp
-; X86-BMI2-NEXT: shrdl %cl, %ebx, %ebp
-; X86-BMI2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: shrdl %cl, %esi, %edi
-; X86-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-BMI2-NEXT: movl $0, (%esp)
; X86-BMI2-NEXT: negb %dl
; X86-BMI2-NEXT: movsbl %dl, %edi
-; X86-BMI2-NEXT: movl 16(%esp,%edi), %edx
-; X86-BMI2-NEXT: movl 20(%esp,%edi), %esi
-; X86-BMI2-NEXT: movl 24(%esp,%edi), %ebx
-; X86-BMI2-NEXT: movl %ebx, %ebp
-; X86-BMI2-NEXT: shldl %cl, %esi, %ebp
-; X86-BMI2-NEXT: movl 28(%esp,%edi), %edi
-; X86-BMI2-NEXT: shldl %cl, %ebx, %edi
-; X86-BMI2-NEXT: movl %edi, 12(%eax)
-; X86-BMI2-NEXT: movl %ebp, 8(%eax)
-; X86-BMI2-NEXT: shlxl %ecx, %edx, %edi
-; X86-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-BMI2-NEXT: movl 24(%esp,%edi), %edx
+; X86-BMI2-NEXT: movl 28(%esp,%edi), %esi
; X86-BMI2-NEXT: shldl %cl, %edx, %esi
-; X86-BMI2-NEXT: movl %esi, 4(%eax)
-; X86-BMI2-NEXT: movl %edi, (%eax)
-; X86-BMI2-NEXT: addl $76, %esp
-; X86-BMI2-NEXT: .cfi_def_cfa_offset 20
-; X86-BMI2-NEXT: popl %esi
+; X86-BMI2-NEXT: movl 16(%esp,%edi), %ebx
+; X86-BMI2-NEXT: movl 20(%esp,%edi), %edi
+; X86-BMI2-NEXT: shldl %cl, %edi, %edx
+; X86-BMI2-NEXT: shldl %cl, %ebx, %edi
+; X86-BMI2-NEXT: shlxl %ecx, %ebx, %ecx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %ecx
+; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi
+; X86-BMI2-NEXT: movl %esi, 12(%eax)
+; X86-BMI2-NEXT: movl %edx, 8(%eax)
+; X86-BMI2-NEXT: movl %edi, 4(%eax)
+; X86-BMI2-NEXT: movl %ecx, (%eax)
+; X86-BMI2-NEXT: addl $32, %esp
; X86-BMI2-NEXT: .cfi_def_cfa_offset 16
-; X86-BMI2-NEXT: popl %edi
+; X86-BMI2-NEXT: popl %esi
; X86-BMI2-NEXT: .cfi_def_cfa_offset 12
-; X86-BMI2-NEXT: popl %ebx
+; X86-BMI2-NEXT: popl %edi
; X86-BMI2-NEXT: .cfi_def_cfa_offset 8
-; X86-BMI2-NEXT: popl %ebp
+; X86-BMI2-NEXT: popl %ebx
; X86-BMI2-NEXT: .cfi_def_cfa_offset 4
; X86-BMI2-NEXT: retl $4
;
; X64-NOBMI-LABEL: mask_pair_128:
; X64-NOBMI: # %bb.0:
; X64-NOBMI-NEXT: movq %rdx, %rcx
-; X64-NOBMI-NEXT: shrdq %cl, %rsi, %rdi
-; X64-NOBMI-NEXT: shrq %cl, %rsi
+; X64-NOBMI-NEXT: movq $-1, %rdx
+; X64-NOBMI-NEXT: movq $-1, %r8
+; X64-NOBMI-NEXT: shlq %cl, %r8
; X64-NOBMI-NEXT: xorl %eax, %eax
; X64-NOBMI-NEXT: testb $64, %cl
-; X64-NOBMI-NEXT: cmovneq %rsi, %rdi
-; X64-NOBMI-NEXT: cmovneq %rax, %rsi
-; X64-NOBMI-NEXT: movq %rdi, %rdx
-; X64-NOBMI-NEXT: shlq %cl, %rdx
-; X64-NOBMI-NEXT: testb $64, %cl
-; X64-NOBMI-NEXT: cmoveq %rdx, %rax
-; X64-NOBMI-NEXT: shldq %cl, %rdi, %rsi
-; X64-NOBMI-NEXT: testb $64, %cl
-; X64-NOBMI-NEXT: cmoveq %rsi, %rdx
+; X64-NOBMI-NEXT: cmovneq %r8, %rdx
+; X64-NOBMI-NEXT: cmoveq %r8, %rax
+; X64-NOBMI-NEXT: andq %rdi, %rax
+; X64-NOBMI-NEXT: andq %rsi, %rdx
; X64-NOBMI-NEXT: retq
;
; X64-BMI1-LABEL: mask_pair_128:
; X64-BMI1: # %bb.0:
; X64-BMI1-NEXT: movq %rdx, %rcx
-; X64-BMI1-NEXT: shrdq %cl, %rsi, %rdi
-; X64-BMI1-NEXT: shrq %cl, %rsi
+; X64-BMI1-NEXT: movq $-1, %rdx
+; X64-BMI1-NEXT: movq $-1, %r8
+; X64-BMI1-NEXT: shlq %cl, %r8
; X64-BMI1-NEXT: xorl %eax, %eax
; X64-BMI1-NEXT: testb $64, %cl
-; X64-BMI1-NEXT: cmovneq %rsi, %rdi
-; X64-BMI1-NEXT: cmovneq %rax, %rsi
-; X64-BMI1-NEXT: movq %rdi, %rdx
-; X64-BMI1-NEXT: shlq %cl, %rdx
-; X64-BMI1-NEXT: testb $64, %cl
-; X64-BMI1-NEXT: cmoveq %rdx, %rax
-; X64-BMI1-NEXT: shldq %cl, %rdi, %rsi
-; X64-BMI1-NEXT: testb $64, %cl
-; X64-BMI1-NEXT: cmoveq %rsi, %rdx
+; X64-BMI1-NEXT: cmovneq %r8, %rdx
+; X64-BMI1-NEXT: cmoveq %r8, %rax
+; X64-BMI1-NEXT: andq %rdi, %rax
+; X64-BMI1-NEXT: andq %rsi, %rdx
; X64-BMI1-NEXT: retq
;
; X64-BMI2-LABEL: mask_pair_128:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: movq %rdx, %rcx
-; X64-BMI2-NEXT: shrdq %cl, %rsi, %rdi
-; X64-BMI2-NEXT: shrxq %rdx, %rsi, %rdx
-; X64-BMI2-NEXT: xorl %esi, %esi
-; X64-BMI2-NEXT: testb $64, %cl
-; X64-BMI2-NEXT: cmovneq %rdx, %rdi
-; X64-BMI2-NEXT: shlxq %rcx, %rdi, %r8
-; X64-BMI2-NEXT: movq %r8, %rax
-; X64-BMI2-NEXT: cmovneq %rsi, %rax
-; X64-BMI2-NEXT: cmovneq %rsi, %rdx
-; X64-BMI2-NEXT: shldq %cl, %rdi, %rdx
-; X64-BMI2-NEXT: testb $64, %cl
-; X64-BMI2-NEXT: cmovneq %r8, %rdx
+; X64-BMI2-NEXT: movq $-1, %rcx
+; X64-BMI2-NEXT: shlxq %rdx, %rcx, %r8
+; X64-BMI2-NEXT: xorl %eax, %eax
+; X64-BMI2-NEXT: testb $64, %dl
+; X64-BMI2-NEXT: cmovneq %r8, %rcx
+; X64-BMI2-NEXT: cmoveq %r8, %rax
+; X64-BMI2-NEXT: andq %rdi, %rax
+; X64-BMI2-NEXT: andq %rsi, %rcx
+; X64-BMI2-NEXT: movq %rcx, %rdx
; X64-BMI2-NEXT: retq
%shl = shl nsw i128 -1, %y
%and = and i128 %shl, %x
ret i128 %and
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-GI: {{.*}}
-; CHECK-SD: {{.*}}
; X64: {{.*}}
; X64-BMINOTBM: {{.*}}
; X64-BMITBM: {{.*}}
More information about the llvm-commits
mailing list