[llvm] [X86] Use RORX over SHR imm (PR #77964)

Bryce Wilson via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 12 14:00:12 PST 2024


https://github.com/Bryce-MW updated https://github.com/llvm/llvm-project/pull/77964

>From d8aab1f0f44abe404e1c183198ed93a59e362821 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 11:58:50 -0600
Subject: [PATCH 1/5] [X86] Use RORX over SHR imm

SHRX is preferred over SHR to avoid setting flags but only for variable shifts. If the output of SHR is being truncated, and the immediate shift is less than the number of bits in the source register minus the number of bits in the result, RORX can be used instead. The most common case would be extracting the top half of a register. I could also see it being used when extracting a byte from a larger register.

I am new to tablegen so I am sure this is not being done in the best way.

As far as I can tell, rorx has the same performance characteristics as shr other than not impacting flags.

The following example was my motivation for doing this:
```c
#include <immintrin.h>

unsigned short checksum(const int* data) {
  const int len = 5;
  unsigned out = data[0];
  unsigned int carry = 0;

  #pragma clang loop unroll(enable)
  for (unsigned int i = 1; i < len; i++) {
    out = __builtin_addc(out, data[i], carry, &carry);
  }
  out = __builtin_addcs((unsigned short)out, (unsigned short)(out >> 16), (unsigned short)carry, (unsigned short*)&carry);
  out += carry;
  return ~(unsigned short)out;
}
```
Currently produces:
```asm
checksum:
        mov     ecx, dword ptr [rdi]
        add     ecx, dword ptr [rdi + 4]
        adc     ecx, dword ptr [rdi + 8]
        adc     ecx, dword ptr [rdi + 12]
        adc     ecx, dword ptr [rdi + 16]
        setb    dl
        mov     eax, ecx
        shr     eax, 16
        add     dl, 255
        adc     ax, cx
        adc     ax, 0
        not     eax
        ret
```
With these changes, it produces:
```asm
checksum:
        mov     ecx, dword ptr [rdi]
        add     ecx, dword ptr [rdi + 4]
        adc     ecx, dword ptr [rdi + 8]
        adc     ecx, dword ptr [rdi + 12]
        adc     ecx, dword ptr [rdi + 16]
        rorx    eax, ecx, 16
        adc     ax, cx
        adc     ax, 0
        not     eax
        ret
```
---
 llvm/lib/Target/X86/X86InstrShiftRotate.td | 78 ++++++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index f951894db1890c..c9e7e1a6eae68b 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -879,6 +879,26 @@ let Predicates = [HasBMI2, HasEGPR, In64BitMode] in {
   defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem, "_EVEX">, T8, PD, REX_W, EVEX;
 }
 
+
+def immle16_8 : ImmLeaf<i8, [{
+  return Imm <= 16 - 8;
+}]>;
+def immle32_8 : ImmLeaf<i8, [{
+  return Imm <= 32 - 8;
+}]>;
+def immle64_8 : ImmLeaf<i8, [{
+  return Imm <= 64 - 8;
+}]>;
+def immle32_16 : ImmLeaf<i8, [{
+  return Imm <= 32 - 16;
+}]>;
+def immle64_16 : ImmLeaf<i8, [{
+  return Imm <= 64 - 16;
+}]>;
+def immle64_32 : ImmLeaf<i8, [{
+  return Imm <= 64 - 32;
+}]>;
+
 let Predicates = [HasBMI2] in {
   // Prefer RORX which is non-destructive and doesn't update EFLAGS.
   let AddedComplexity = 10 in {
@@ -891,6 +911,64 @@ let Predicates = [HasBMI2] in {
               (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
     def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
               (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
+
+    // A right shift by less than a smaller register size that is then
+    // truncated to that register size can be replaced by RORX to
+    // preserve flags with the same execution cost
+
+    def : Pat<(i8 (trunc (srl GR16:$src, (i8 immle16_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR16:$src, (i8 immle16_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl GR32:$src, (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR32:$src, (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl GR64:$src, (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR64:$src, (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+
+
+    def : Pat<(i16 (trunc (srl GR32:$src, (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra GR32:$src, (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (srl GR64:$src, (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra GR64:$src, (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+
+    def : Pat<(i32 (trunc (srl GR64:$src, (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
+    def : Pat<(i32 (trunc (sra GR64:$src, (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
+
+
+    // Can't expand the load
+    def : Pat<(i8 (trunc (srl (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+
+
+    def : Pat<(i16 (trunc (srl (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (srl (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+
+    def : Pat<(i32 (trunc (srl (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;
+    def : Pat<(i32 (trunc (sra (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;
   }
 
   def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),

>From 6459c1dfc4f131b6a4cc1f559cf80667e6e4fede Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:57:03 -0600
Subject: [PATCH 2/5] [X86] Fix RORX patterns

---
 llvm/lib/Target/X86/X86InstrShiftRotate.td | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index c9e7e1a6eae68b..238e8e9b6e97f3 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -925,9 +925,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i8 (trunc (sra GR32:$src, (i8 immle32_8:$shamt)))),
               (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (srl GR64:$src, (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (sra GR64:$src, (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_8bit)>;
 
 
     def : Pat<(i16 (trunc (srl GR32:$src, (i8 immle32_16:$shamt)))),
@@ -935,9 +935,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i16 (trunc (sra GR32:$src, (i8 immle32_16:$shamt)))),
               (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (srl GR64:$src, (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (sra GR64:$src, (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_16bit)>;
 
     def : Pat<(i32 (trunc (srl GR64:$src, (i8 immle64_32:$shamt)))),
               (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
@@ -951,9 +951,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i8 (trunc (sra (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
               (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (srl (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (sra (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_8bit)>;
 
 
     def : Pat<(i16 (trunc (srl (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
@@ -961,9 +961,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i16 (trunc (sra (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
               (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (srl (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (sra (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_16bit)>;
 
     def : Pat<(i32 (trunc (srl (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
               (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;

>From 6202a22c0deee13fe40b1ff9730b1c5642289bd1 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:58:30 -0600
Subject: [PATCH 3/5] Update atomic-unordered.ll

---
 llvm/test/CodeGen/X86/atomic-unordered.ll | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index df123be53474f0..c867817fd2dfff 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -2062,8 +2062,7 @@ define i32 @split_load(ptr %p) {
 ; CHECK-O3-LABEL: split_load:
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    movq %rax, %rcx
-; CHECK-O3-NEXT:    shrq $32, %rcx
+; CHECK-O3-NEXT:    rorxq $32, %rax, %rcx
 ; CHECK-O3-NEXT:    orl %eax, %ecx
 ; CHECK-O3-NEXT:    movzbl %cl, %eax
 ; CHECK-O3-NEXT:    retq

>From dcd7e86bb27a7acbe5477e730dd63202527adc10 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:59:25 -0600
Subject: [PATCH 4/5] Update bmi2.ll

---
 llvm/test/CodeGen/X86/bmi2.ll | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/X86/bmi2.ll b/llvm/test/CodeGen/X86/bmi2.ll
index 24e38cfeb704df..e81434b35096ac 100644
--- a/llvm/test/CodeGen/X86/bmi2.ll
+++ b/llvm/test/CodeGen/X86/bmi2.ll
@@ -310,8 +310,7 @@ define i32 @mulx32(i32 %x, i32 %y, ptr %p)   {
 ; X64-NEXT:    addl %edi, %edi
 ; X64-NEXT:    leal (%rsi,%rsi), %eax
 ; X64-NEXT:    imulq %rdi, %rax
-; X64-NEXT:    movq %rax, %rcx
-; X64-NEXT:    shrq $32, %rcx
+; X64-NEXT:    rorxq $32, %rax, %rcx
 ; X64-NEXT:    movl %ecx, (%rdx)
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq
@@ -344,8 +343,7 @@ define i32 @mulx32_load(i32 %x, ptr %y, ptr %p)   {
 ; X64-NEXT:    leal (%rdi,%rdi), %eax
 ; X64-NEXT:    movl (%rsi), %ecx
 ; X64-NEXT:    imulq %rcx, %rax
-; X64-NEXT:    movq %rax, %rcx
-; X64-NEXT:    shrq $32, %rcx
+; X64-NEXT:    rorxq $32, %rax, %rcx
 ; X64-NEXT:    movl %ecx, (%rdx)
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq

>From 56bc63ab44af67701042db5022bfc2727095410e Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 16:00:04 -0600
Subject: [PATCH 5/5] Update bmi2.ll

---
 llvm/test/CodeGen/X86/bmi2.ll | 1135 +++++++++++++++++++++++----------
 1 file changed, 793 insertions(+), 342 deletions(-)

diff --git a/llvm/test/CodeGen/X86/bmi2.ll b/llvm/test/CodeGen/X86/bmi2.ll
index e81434b35096ac..a935bca3161b09 100644
--- a/llvm/test/CodeGen/X86/bmi2.ll
+++ b/llvm/test/CodeGen/X86/bmi2.ll
@@ -1,360 +1,811 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi,+bmi2,+cmov | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 | FileCheck %s --check-prefix=X64
-
-define i32 @bzhi32(i32 %x, i32 %y)   {
-; X86-LABEL: bzhi32:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    addl %ecx, %ecx
-; X86-NEXT:    bzhil %eax, %ecx, %eax
-; X86-NEXT:    retl
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,CHECK-NOBMI,CHECK-NOBMI-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2 | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-BMI2-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx2 | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi2,+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,CHECK-BMI2,CHECK-AVX,CHECK-AVX512
+declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <16 x i16> @llvm.fshl.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare <16 x i16> @llvm.fshr.v16i16(<16 x i16>, <16 x i16>, <16 x i16>)
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+declare i16 @llvm.fshl.i16(i16, i16, i16)
+declare i16 @llvm.fshr.i16(i16, i16, i16)
+declare i8 @llvm.fshl.i8(i8, i8, i8)
+declare i8 @llvm.fshr.i8(i8, i8, i8)
+
+define i1 @shr_to_shl_eq_i8_s2(i8 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i8_s2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    rolb $2, %al
+; CHECK-NEXT:    cmpb %al, %dil
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i8 %x, 63
+  %shr = lshr i8 %x, 2
+  %r = icmp eq i8 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_ne_i8_s7(i8 %x) {
+; CHECK-LABEL: shl_to_shr_ne_i8_s7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    shrb $7, %al
+; CHECK-NEXT:    andb $1, %dil
+; CHECK-NEXT:    cmpb %al, %dil
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %shl = shl i8 %x, 7
+  %and = and i8 %x, 128
+  %r = icmp ne i8 %shl, %and
+  ret i1 %r
+}
+
+define i1 @rorl_to_srl_ne_i8_s5_fail(i8 %x) {
+; CHECK-LABEL: rorl_to_srl_ne_i8_s5_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    rolb $5, %al
+; CHECK-NEXT:    cmpb %dil, %al
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %ror = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 5)
+  %r = icmp ne i8 %ror, %x
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i8_s1(i8 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i8_s1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    rolb %al
+; CHECK-NEXT:    cmpb %al, %dil
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i8 %x, 127
+  %shr = lshr i8 %x, 1
+  %r = icmp eq i8 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i32_s3(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i32_s3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    leal (,%rdi,8), %eax
+; CHECK-NEXT:    andl $-8, %edi
+; CHECK-NEXT:    cmpl %eax, %edi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 536870911
+  %shr = lshr i32 %x, 3
+  %r = icmp eq i32 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s3_fail(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s3_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $536870911, %eax # imm = 0x1FFFFFFF
+; CHECK-NEXT:    shll $3, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 536870911
+  %shr = shl i32 %x, 3
+  %r = icmp eq i32 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_ne_i32_s16(i32 %x) {
+; CHECK-NOBMI-LABEL: shl_to_shr_ne_i32_s16:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movzwl %di, %eax
+; CHECK-NOBMI-NEXT:    shrl $16, %edi
+; CHECK-NOBMI-NEXT:    cmpl %edi, %eax
+; CHECK-NOBMI-NEXT:    setne %al
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: bzhi32:
-; X64:       # %bb.0:
-; X64-NEXT:    addl %edi, %edi
-; X64-NEXT:    bzhil %esi, %edi, %eax
-; X64-NEXT:    retq
-  %x1 = add i32 %x, %x
-  %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y)
-  ret i32 %tmp
-}
-
-define i32 @bzhi32_load(ptr %x, i32 %y)   {
-; X86-LABEL: bzhi32_load:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    bzhil %eax, (%ecx), %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-LABEL: shl_to_shr_ne_i32_s16:
+; CHECK-BMI2:       # %bb.0:
+; CHECK-BMI2-NEXT:    rorxl $16, %edi, %eax
+; CHECK-BMI2-NEXT:    cmpl %eax, %edi
+; CHECK-BMI2-NEXT:    setne %al
+; CHECK-BMI2-NEXT:    retq
+  %shl = shl i32 %x, 16
+  %and = and i32 %x, 4294901760
+  %r = icmp ne i32 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_ne_i32_s16_fail(i32 %x) {
+; CHECK-LABEL: shl_to_shr_ne_i32_s16_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    shll $16, %eax
+; CHECK-NEXT:    andl $2147450880, %edi # imm = 0x7FFF8000
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %shl = shl i32 %x, 16
+  %and = and i32 %x, 2147450880
+  %r = icmp ne i32 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i16_s1(i16 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i16_s1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    rolw %ax
+; CHECK-NEXT:    cmpw %ax, %di
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i16 %x, 32767
+  %shr = lshr i16 %x, 1
+  %r = icmp eq i16 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i16_s1_fail(i16 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i16_s1_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl %di, %eax
+; CHECK-NEXT:    andl $32766, %edi # imm = 0x7FFE
+; CHECK-NOBMI-NEXT:    shrl %eax
+; CHECK-BMI2-NEXT:    rorxl $1, %eax, %eax
+; CHECK-NEXT:    cmpw %ax, %di
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i16 %x, 32766
+  %shr = lshr i16 %x, 1
+  %r = icmp eq i16 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i64_s44(i64 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i64_s44:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    shrq $44, %rax
+; CHECK-NEXT:    andl $1048575, %edi # imm = 0xFFFFF
+; CHECK-NEXT:    cmpq %rax, %rdi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %shl = shl i64 %x, 44
+  %and = and i64 %x, 18446726481523507200
+  %r = icmp eq i64 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_ne_i64_s32(i64 %x) {
+; CHECK-NOBMI-LABEL: shr_to_shl_ne_i64_s32:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movl %edi, %eax
+; CHECK-NOBMI-NEXT:    shrq $32, %rdi
+; CHECK-NOBMI-NEXT:    cmpq %rdi, %rax
+; CHECK-NOBMI-NEXT:    setne %al
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: bzhi32_load:
-; X64:       # %bb.0:
-; X64-NEXT:    bzhil %esi, (%rdi), %eax
-; X64-NEXT:    retq
-  %x1 = load i32, ptr %x
-  %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y)
-  ret i32 %tmp
-}
-
-; PR48768 - 'bzhi' clears the overflow flag, so we don't need a separate 'test'.
-define i1 @bzhi32_overflow(i32 %x, i32 %y) {
-; X86-LABEL: bzhi32_overflow:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    bzhil %eax, {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    setle %al
-; X86-NEXT:    retl
+; CHECK-BMI2-LABEL: shr_to_shl_ne_i64_s32:
+; CHECK-BMI2:       # %bb.0:
+; CHECK-BMI2-NEXT:    rorxq $32, %rdi, %rax
+; CHECK-BMI2-NEXT:    cmpq %rax, %rdi
+; CHECK-BMI2-NEXT:    setne %al
+; CHECK-BMI2-NEXT:    retq
+  %and = and i64 %x, 4294967295
+  %shr = lshr i64 %x, 32
+  %r = icmp ne i64 %and, %shr
+  ret i1 %r
+}
+
+define i1 @rorl_to_shl_eq_i64_s16(i64 %x) {
+; CHECK-NOBMI-LABEL: rorl_to_shl_eq_i64_s16:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movq %rdi, %rax
+; CHECK-NOBMI-NEXT:    rolq $16, %rax
+; CHECK-NOBMI-NEXT:    cmpq %rdi, %rax
+; CHECK-NOBMI-NEXT:    sete %al
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: bzhi32_overflow:
-; X64:       # %bb.0:
-; X64-NEXT:    bzhil %esi, %edi, %eax
-; X64-NEXT:    setle %al
-; X64-NEXT:    retq
-  %tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x, i32 %y)
-  %cmp = icmp slt i32 %tmp, 1
-  ret i1 %cmp
-}
-
-declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
-
-define i32 @pdep32(i32 %x, i32 %y)   {
-; X86-LABEL: pdep32:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    addl %ecx, %ecx
-; X86-NEXT:    pdepl %ecx, %eax, %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-LABEL: rorl_to_shl_eq_i64_s16:
+; CHECK-BMI2:       # %bb.0:
+; CHECK-BMI2-NEXT:    rorxq $48, %rdi, %rax
+; CHECK-BMI2-NEXT:    cmpq %rdi, %rax
+; CHECK-BMI2-NEXT:    sete %al
+; CHECK-BMI2-NEXT:    retq
+  %ror = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 16)
+  %r = icmp eq i64 %ror, %x
+  ret i1 %r
+}
+
+define i1 @ashr_to_shl_ne_i64_s32_fail(i64 %x) {
+; CHECK-LABEL: ashr_to_shl_ne_i64_s32_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    sarq $32, %rdi
+; CHECK-NEXT:    cmpq %rdi, %rax
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %and = and i64 %x, 4294967295
+  %shr = ashr i64 %x, 32
+  %r = icmp ne i64 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i64_s63(i64 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i64_s63:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    shrq $63, %rax
+; CHECK-NEXT:    andl $1, %edi
+; CHECK-NEXT:    cmpq %rax, %rdi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %shl = shl i64 %x, 63
+  %and = and i64 %x, 9223372036854775808
+  %r = icmp eq i64 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i64_s63_fail(i64 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i64_s63_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
+; CHECK-NEXT:    andq %rdi, %rax
+; CHECK-NEXT:    shlq $63, %rdi
+; CHECK-NEXT:    cmpq %rax, %rdi
+; CHECK-NEXT:    seta %al
+; CHECK-NEXT:    retq
+  %shl = shl i64 %x, 63
+  %and = and i64 %x, 9223372036854775808
+  %r = icmp ugt i64 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i64_s7(i64 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i64_s7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    shlq $7, %rax
+; CHECK-NEXT:    andq $-128, %rdi
+; CHECK-NEXT:    cmpq %rax, %rdi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i64 %x, 144115188075855871
+  %shr = lshr i64 %x, 7
+  %r = icmp eq i64 %and, %shr
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_ne_i32_s24(i32 %x) {
+; CHECK-LABEL: shl_to_shr_ne_i32_s24:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzbl %dil, %eax
+; CHECK-NEXT:    shrl $24, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %shl = shl i32 %x, 24
+  %and = and i32 %x, 4278190080
+  %r = icmp ne i32 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_ne_i32_s24_fail(i32 %x) {
+; CHECK-LABEL: shr_to_shl_ne_i32_s24_fail:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    shrl $24, %eax
+; CHECK-NEXT:    andl $-16777216, %edi # imm = 0xFF000000
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    retq
+  %shl = lshr i32 %x, 24
+  %and = and i32 %x, 4278190080
+  %r = icmp ne i32 %shl, %and
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_ne_i32_s8(i32 %x) {
+; CHECK-NOBMI-LABEL: shr_to_shl_ne_i32_s8:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movl %edi, %eax
+; CHECK-NOBMI-NEXT:    roll $8, %eax
+; CHECK-NOBMI-NEXT:    cmpl %eax, %edi
+; CHECK-NOBMI-NEXT:    setne %al
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: pdep32:
-; X64:       # %bb.0:
-; X64-NEXT:    addl %esi, %esi
-; X64-NEXT:    pdepl %esi, %edi, %eax
-; X64-NEXT:    retq
-  %y1 = add i32 %y, %y
-  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1)
-  ret i32 %tmp
-}
-
-define i32 @pdep32_load(i32 %x, ptr %y)   {
-; X86-LABEL: pdep32_load:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    pdepl (%eax), %ecx, %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-LABEL: shr_to_shl_ne_i32_s8:
+; CHECK-BMI2:       # %bb.0:
+; CHECK-BMI2-NEXT:    rorxl $24, %edi, %eax
+; CHECK-BMI2-NEXT:    cmpl %eax, %edi
+; CHECK-BMI2-NEXT:    setne %al
+; CHECK-BMI2-NEXT:    retq
+  %and = and i32 %x, 16777215
+  %shr = lshr i32 %x, 8
+  %r = icmp ne i32 %and, %shr
+  ret i1 %r
+}
+
+define <4 x i1> @shr_to_ror_eq_4xi32_s4(<4 x i32> %x) {
+; CHECK-NOBMI-LABEL: shr_to_ror_eq_4xi32_s4:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-NOBMI-NEXT:    psrld $4, %xmm1
+; CHECK-NOBMI-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-NOBMI-NEXT:    pxor %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: pdep32_load:
-; X64:       # %bb.0:
-; X64-NEXT:    pdepl (%rsi), %edi, %eax
-; X64-NEXT:    retq
-  %y1 = load i32, ptr %y
-  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1)
-  ret i32 %tmp
-}
-
-define i32 @pdep32_anyext(i16 %x)   {
-; X86-LABEL: pdep32_anyext:
-; X86:       # %bb.0:
-; X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $-1431655766, %ecx # imm = 0xAAAAAAAA
-; X86-NEXT:    pdepl %ecx, %eax, %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-SSE2-LABEL: shr_to_ror_eq_4xi32_s4:
+; CHECK-BMI2-SSE2:       # %bb.0:
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    psrld $4, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    retq
 ;
-; X64-LABEL: pdep32_anyext:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $-1431655766, %eax # imm = 0xAAAAAAAA
-; X64-NEXT:    pdepl %eax, %edi, %eax
-; X64-NEXT:    retq
-  %x1 = sext i16 %x to i32
-  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x1, i32 -1431655766)
-  ret i32 %tmp
-}
-
-define i32 @pdep32_demandedbits(i32 %x) {
-; X86-LABEL: pdep32_demandedbits:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $1431655765, %ecx # imm = 0x55555555
-; X86-NEXT:    pdepl %ecx, %eax, %eax
-; X86-NEXT:    retl
+; CHECK-AVX2-LABEL: shr_to_ror_eq_4xi32_s4:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpsrld $4, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [268435455,268435455,268435455,268435455]
+; CHECK-AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
 ;
-; X64-LABEL: pdep32_demandedbits:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $1431655765, %eax # imm = 0x55555555
-; X64-NEXT:    pdepl %eax, %edi, %eax
-; X64-NEXT:    retq
-  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 1431655765)
-  %tmp2 = and i32 %tmp, 1431655765
-  ret i32 %tmp2
-}
-
-define i32 @pdep32_demandedbits2(i32 %x, i32 %y) {
-; X86-LABEL: pdep32_demandedbits2:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    pdepl {{[0-9]+}}(%esp), %eax, %eax
-; X86-NEXT:    andl $128, %eax
-; X86-NEXT:    retl
+; CHECK-AVX512-LABEL: shr_to_ror_eq_4xi32_s4:
+; CHECK-AVX512:       # %bb.0:
+; CHECK-AVX512-NEXT:    vprold $4, %xmm0, %xmm1
+; CHECK-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    retq
+  %shr = lshr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
+  %and = and <4 x i32> %x, <i32 268435455, i32 268435455, i32 268435455, i32 268435455>
+  %r = icmp ne <4 x i32> %shr, %and
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @shl_to_ror_eq_4xi32_s8(<4 x i32> %x) {
+; CHECK-NOBMI-LABEL: shl_to_ror_eq_4xi32_s8:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-NOBMI-NEXT:    pslld $8, %xmm1
+; CHECK-NOBMI-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-NOBMI-NEXT:    pxor %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: pdep32_demandedbits2:
-; X64:       # %bb.0:
-; X64-NEXT:    pdepl %esi, %edi, %eax
-; X64-NEXT:    andl $128, %eax
-; X64-NEXT:    retq
-  %tmp = and i32 %x, 255
-  %tmp2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %tmp, i32 %y)
-  %tmp3 = and i32 %tmp2, 128
-  ret i32 %tmp3
-}
-
-define i32 @pdep32_demandedbits_mask(i32 %x, i16 %y) {
-; X86-LABEL: pdep32_demandedbits_mask:
-; X86:       # %bb.0:
-; X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    pdepl %eax, %ecx, %eax
-; X86-NEXT:    andl $32768, %eax # imm = 0x8000
-; X86-NEXT:    retl
+; CHECK-BMI2-SSE2-LABEL: shl_to_ror_eq_4xi32_s8:
+; CHECK-BMI2-SSE2:       # %bb.0:
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pslld $8, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    retq
 ;
-; X64-LABEL: pdep32_demandedbits_mask:
-; X64:       # %bb.0:
-; X64-NEXT:    pdepl %esi, %edi, %eax
-; X64-NEXT:    andl $32768, %eax # imm = 0x8000
-; X64-NEXT:    retq
-  %tmp = sext i16 %y to i32
-  %tmp2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %tmp)
-  %tmp3 = and i32 %tmp2, 32768
-  ret i32 %tmp3
-}
-
-define i32 @pdep32_demandedbits_mask2(i32 %x, i16 %y) {
-; X86-LABEL: pdep32_demandedbits_mask2:
-; X86:       # %bb.0:
-; X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    pdepl %eax, %ecx, %eax
-; X86-NEXT:    movzwl %ax, %eax
-; X86-NEXT:    retl
+; CHECK-AVX2-LABEL: shl_to_ror_eq_4xi32_s8:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpslld $8, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
 ;
-; X64-LABEL: pdep32_demandedbits_mask2:
-; X64:       # %bb.0:
-; X64-NEXT:    pdepl %esi, %edi, %eax
-; X64-NEXT:    movzwl %ax, %eax
-; X64-NEXT:    retq
-  %tmp = sext i16 %y to i32
-  %tmp2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %tmp)
-  %tmp3 = and i32 %tmp2, 65535
-  ret i32 %tmp3
-}
-
-define i32 @pdep32_knownbits(i32 %x) {
-; X86-LABEL: pdep32_knownbits:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $1431655765, %ecx # imm = 0x55555555
-; X86-NEXT:    pdepl %ecx, %eax, %eax
-; X86-NEXT:    imull %eax, %eax
-; X86-NEXT:    retl
+; CHECK-AVX512-LABEL: shl_to_ror_eq_4xi32_s8:
+; CHECK-AVX512:       # %bb.0:
+; CHECK-AVX512-NEXT:    vprold $8, %xmm0, %xmm1
+; CHECK-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    retq
+  %shr = shl <4 x i32> %x, <i32 8, i32 8, i32 8, i32 8>
+  %and = and <4 x i32> %x, <i32 4294967040, i32 4294967040, i32 4294967040, i32 4294967040>
+  %r = icmp ne <4 x i32> %shr, %and
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @shl_to_ror_eq_4xi32_s7_fail_no_p2(<4 x i32> %x) {
+; CHECK-NOBMI-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-NOBMI-NEXT:    pslld $7, %xmm1
+; CHECK-NOBMI-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-NOBMI-NEXT:    pxor %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: pdep32_knownbits:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $1431655765, %eax # imm = 0x55555555
-; X64-NEXT:    pdepl %eax, %edi, %eax
-; X64-NEXT:    imull %eax, %eax
-; X64-NEXT:    retq
-  %tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 1431655765)
-  %tmp2 = and i32 %tmp, 1431655765
-  %tmp3 = mul i32 %tmp, %tmp2
-  ret i32 %tmp3
-}
-
-define i32 @pdep32_knownbits2(i32 %x, i32 %y) {
-; X86-LABEL: pdep32_knownbits2:
-; X86:       # %bb.0:
-; X86-NEXT:    movl $-256, %eax
-; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    pdepl {{[0-9]+}}(%esp), %eax, %eax
-; X86-NEXT:    imull %eax, %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-SSE2-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
+; CHECK-BMI2-SSE2:       # %bb.0:
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pslld $7, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    retq
 ;
-; X64-LABEL: pdep32_knownbits2:
-; X64:       # %bb.0:
-; X64-NEXT:    andl $-256, %edi
-; X64-NEXT:    pdepl %esi, %edi, %eax
-; X64-NEXT:    imull %eax, %eax
-; X64-NEXT:    retq
-  %tmp = and i32 %x, -256
-  %tmp2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %tmp, i32 %y)
-  %tmp3 = and i32 %tmp2, -256
-  %tmp4 = mul i32 %tmp2, %tmp3
-  ret i32 %tmp4
-}
-
-declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
-
-define i32 @pext32(i32 %x, i32 %y)   {
-; X86-LABEL: pext32:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    addl %ecx, %ecx
-; X86-NEXT:    pextl %ecx, %eax, %eax
-; X86-NEXT:    retl
+; CHECK-AVX2-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpslld $7, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [4294967168,4294967168,4294967168,4294967168]
+; CHECK-AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
 ;
-; X64-LABEL: pext32:
-; X64:       # %bb.0:
-; X64-NEXT:    addl %esi, %esi
-; X64-NEXT:    pextl %esi, %edi, %eax
-; X64-NEXT:    retq
-  %y1 = add i32 %y, %y
-  %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1)
-  ret i32 %tmp
-}
-
-define i32 @pext32_load(i32 %x, ptr %y)   {
-; X86-LABEL: pext32_load:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    pextl (%eax), %ecx, %eax
-; X86-NEXT:    retl
+; CHECK-AVX512-LABEL: shl_to_ror_eq_4xi32_s7_fail_no_p2:
+; CHECK-AVX512:       # %bb.0:
+; CHECK-AVX512-NEXT:    vpslld $7, %xmm0, %xmm1
+; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    retq
+  %shr = shl <4 x i32> %x, <i32 7, i32 7, i32 7, i32 7>
+  %and = and <4 x i32> %x, <i32 4294967168, i32 4294967168, i32 4294967168, i32 4294967168>
+  %r = icmp ne <4 x i32> %shr, %and
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @shr_to_ror_eq_4xi32_s4_fail_no_splat(<4 x i32> %x) {
+; CHECK-NOBMI-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-NOBMI-NEXT:    psrld $4, %xmm1
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-NOBMI-NEXT:    psrld $8, %xmm2
+; CHECK-NOBMI-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,0],xmm1[2,0]
+; CHECK-NOBMI-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
+; CHECK-NOBMI-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-NOBMI-NEXT:    pxor %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    retq
 ;
-; X64-LABEL: pext32_load:
-; X64:       # %bb.0:
-; X64-NEXT:    pextl (%rsi), %edi, %eax
-; X64-NEXT:    retq
-  %y1 = load i32, ptr %y
-  %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1)
-  ret i32 %tmp
-}
-
-define i32 @pext32_knownbits(i32 %x)   {
-; X86-LABEL: pext32_knownbits:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl $1431655765, %ecx # imm = 0x55555555
-; X86-NEXT:    pextl %ecx, %eax, %eax
-; X86-NEXT:    retl
+; CHECK-BMI2-SSE2-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
+; CHECK-BMI2-SSE2:       # %bb.0:
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    psrld $4, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-BMI2-SSE2-NEXT:    psrld $8, %xmm2
+; CHECK-BMI2-SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,0],xmm1[2,0]
+; CHECK-BMI2-SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
+; CHECK-BMI2-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    retq
 ;
-; X64-LABEL: pext32_knownbits:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $1431655765, %eax # imm = 0x55555555
-; X64-NEXT:    pextl %eax, %edi, %eax
-; X64-NEXT:    retq
-  %tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 1431655765)
-  %tmp2 = and i32 %tmp, 65535
-  ret i32 %tmp2
-}
-
-declare i32 @llvm.x86.bmi.pext.32(i32, i32)
-
-define i32 @mulx32(i32 %x, i32 %y, ptr %p)   {
-; X86-LABEL: mulx32:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    addl %edx, %edx
-; X86-NEXT:    addl %eax, %eax
-; X86-NEXT:    mulxl %eax, %eax, %edx
-; X86-NEXT:    movl %edx, (%ecx)
-; X86-NEXT:    retl
+; CHECK-AVX2-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [268435455,268435455,268435455,268435455]
+; CHECK-AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
 ;
-; X64-LABEL: mulx32:
-; X64:       # %bb.0:
-; X64-NEXT:    # kill: def $esi killed $esi def $rsi
-; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    addl %edi, %edi
-; X64-NEXT:    leal (%rsi,%rsi), %eax
-; X64-NEXT:    imulq %rdi, %rax
-; X64-NEXT:    rorxq $32, %rax, %rcx
-; X64-NEXT:    movl %ecx, (%rdx)
-; X64-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-NEXT:    retq
-  %x1 = add i32 %x, %x
-  %y1 = add i32 %y, %y
-  %x2 = zext i32 %x1 to i64
-  %y2 = zext i32 %y1 to i64
-  %r1 = mul i64 %x2, %y2
-  %h1 = lshr i64 %r1, 32
-  %h  = trunc i64 %h1 to i32
-  %l  = trunc i64 %r1 to i32
-  store i32 %h, ptr %p
-  ret i32 %l
-}
-
-define i32 @mulx32_load(i32 %x, ptr %y, ptr %p)   {
-; X86-LABEL: mulx32_load:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    addl %edx, %edx
-; X86-NEXT:    mulxl (%eax), %eax, %edx
-; X86-NEXT:    movl %edx, (%ecx)
-; X86-NEXT:    retl
+; CHECK-AVX512-LABEL: shr_to_ror_eq_4xi32_s4_fail_no_splat:
+; CHECK-AVX512:       # %bb.0:
+; CHECK-AVX512-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; CHECK-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    retq
+  %shr = lshr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 8>
+  %and = and <4 x i32> %x, <i32 268435455, i32 268435455, i32 268435455, i32 268435455>
+  %r = icmp ne <4 x i32> %shr, %and
+  ret <4 x i1> %r
+}
+
+define <16 x i1> @shl_to_ror_eq_16xi16_s8_fail_preserve_i16(<16 x i16> %x) {
+; CHECK-NOBMI-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-NOBMI-NEXT:    psllw $8, %xmm2
+; CHECK-NOBMI-NEXT:    movdqa %xmm1, %xmm3
+; CHECK-NOBMI-NEXT:    psllw $8, %xmm3
+; CHECK-NOBMI-NEXT:    movdqa {{.*#+}} xmm4 = [0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255]
+; CHECK-NOBMI-NEXT:    pand %xmm4, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqw %xmm2, %xmm0
+; CHECK-NOBMI-NEXT:    pand %xmm4, %xmm1
+; CHECK-NOBMI-NEXT:    pcmpeqw %xmm3, %xmm1
+; CHECK-NOBMI-NEXT:    packsswb %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-NOBMI-NEXT:    pxor %xmm1, %xmm0
+; CHECK-NOBMI-NEXT:    retq
+;
+; CHECK-BMI2-SSE2-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
+; CHECK-BMI2-SSE2:       # %bb.0:
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-BMI2-SSE2-NEXT:    psllw $8, %xmm2
+; CHECK-BMI2-SSE2-NEXT:    movdqa %xmm1, %xmm3
+; CHECK-BMI2-SSE2-NEXT:    psllw $8, %xmm3
+; CHECK-BMI2-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255]
+; CHECK-BMI2-SSE2-NEXT:    pand %xmm4, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqw %xmm2, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pand %xmm4, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqw %xmm3, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    packsswb %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; CHECK-BMI2-SSE2-NEXT:    pxor %xmm1, %xmm0
+; CHECK-BMI2-SSE2-NEXT:    retq
+;
+; CHECK-AVX2-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpsllw $8, %ymm0, %ymm1
+; CHECK-AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; CHECK-AVX2-NEXT:    vpcmpeqw %ymm0, %ymm1, %ymm0
+; CHECK-AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; CHECK-AVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; CHECK-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; CHECK-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vzeroupper
+; CHECK-AVX2-NEXT:    retq
 ;
-; X64-LABEL: mulx32_load:
-; X64:       # %bb.0:
-; X64-NEXT:    # kill: def $edi killed $edi def $rdi
-; X64-NEXT:    leal (%rdi,%rdi), %eax
-; X64-NEXT:    movl (%rsi), %ecx
-; X64-NEXT:    imulq %rcx, %rax
-; X64-NEXT:    rorxq $32, %rax, %rcx
-; X64-NEXT:    movl %ecx, (%rdx)
-; X64-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-NEXT:    retq
-  %x1 = add i32 %x, %x
-  %y1 = load i32, ptr %y
-  %x2 = zext i32 %x1 to i64
-  %y2 = zext i32 %y1 to i64
-  %r1 = mul i64 %x2, %y2
-  %h1 = lshr i64 %r1, 32
-  %h  = trunc i64 %h1 to i32
-  %l  = trunc i64 %r1 to i32
-  store i32 %h, ptr %p
-  ret i32 %l
+; CHECK-AVX512-LABEL: shl_to_ror_eq_16xi16_s8_fail_preserve_i16:
+; CHECK-AVX512:       # %bb.0:
+; CHECK-AVX512-NEXT:    vpsllw $8, %ymm0, %ymm1
+; CHECK-AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-AVX512-NEXT:    vpcmpeqw %ymm0, %ymm1, %ymm0
+; CHECK-AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; CHECK-AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; CHECK-AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
+; CHECK-AVX512-NEXT:    vzeroupper
+; CHECK-AVX512-NEXT:    retq
+  %shr = shl <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %and = and <16 x i16> %x, <i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040, i16 4294967040>
+  %r = icmp ne <16 x i16> %shr, %and
+  ret <16 x i1> %r
+}
+
+define i1 @shr_to_shl_eq_i32_s5_fail_doesnt_add_up(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i32_s5_fail_doesnt_add_up:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $7, %eax
+; CHECK-NEXT:    shrl $5, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 7
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i8_s5_fail_doesnt_add_up2(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i8_s5_fail_doesnt_add_up2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $268435455, %eax # imm = 0xFFFFFFF
+; CHECK-NEXT:    shrl $5, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 268435455
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i8_s5_fail_doesnt_add_up3(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i8_s5_fail_doesnt_add_up3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $67108863, %eax # imm = 0x3FFFFFF
+; CHECK-NEXT:    shrl $5, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 67108863
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i8_s5_fail_doesnt_not_mask(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i8_s5_fail_doesnt_not_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $11, %eax
+; CHECK-NEXT:    shrl $5, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 11
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_wrong_mask(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_wrong_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $511, %eax # imm = 0x1FF
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 511
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i32_s5_fail_wrong_mask(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i32_s5_fail_wrong_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $-32, %eax
+; CHECK-NEXT:    shrl $5, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -32
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
 }
+
+define i1 @shl_to_shr_eq_i32_s9_fail_doesnt_add_up(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_doesnt_add_up:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $65024, %eax # imm = 0xFE00
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 65024
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_doesnt_add_up2(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_doesnt_add_up2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $-1024, %eax # imm = 0xFC00
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -1024
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_doesnt_add_up3(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_doesnt_add_up3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $-256, %eax
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -256
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_not_mask(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_not_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $-511, %eax # imm = 0xFE01
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -511
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_not_mask2(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_not_mask2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $-255, %eax
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -255
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9_fail_wrong_mask2(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9_fail_wrong_mask2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    andl $8388607, %eax # imm = 0x7FFFFF
+; CHECK-NEXT:    shll $9, %edi
+; CHECK-NEXT:    cmpl %edi, %eax
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 8388607
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shl_to_shr_eq_i32_s9(i32 %x) {
+; CHECK-LABEL: shl_to_shr_eq_i32_s9:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    shrl $9, %eax
+; CHECK-NEXT:    andl $8388607, %edi # imm = 0x7FFFFF
+; CHECK-NEXT:    cmpl %eax, %edi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, -512
+  %sh = shl i32 %x, 9
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_shl_eq_i32_s5(i32 %x) {
+; CHECK-LABEL: shr_to_shl_eq_i32_s5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    shll $5, %eax
+; CHECK-NEXT:    andl $-32, %edi
+; CHECK-NEXT:    cmpl %eax, %edi
+; CHECK-NEXT:    sete %al
+; CHECK-NEXT:    retq
+  %and = and i32 %x, 134217727
+  %sh = lshr i32 %x, 5
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+define i1 @shr_to_rotate_eq_i32_s5(i32 %x) {
+; CHECK-NOBMI-LABEL: shr_to_rotate_eq_i32_s5:
+; CHECK-NOBMI:       # %bb.0:
+; CHECK-NOBMI-NEXT:    movl %edi, %eax
+; CHECK-NOBMI-NEXT:    roll $4, %eax
+; CHECK-NOBMI-NEXT:    cmpl %eax, %edi
+; CHECK-NOBMI-NEXT:    sete %al
+; CHECK-NOBMI-NEXT:    retq
+;
+; CHECK-BMI2-LABEL: shr_to_rotate_eq_i32_s5:
+; CHECK-BMI2:       # %bb.0:
+; CHECK-BMI2-NEXT:    rorxl $28, %edi, %eax
+; CHECK-BMI2-NEXT:    cmpl %eax, %edi
+; CHECK-BMI2-NEXT:    sete %al
+; CHECK-BMI2-NEXT:    retq
+  %and = and i32 %x, 268435455
+  %sh = lshr i32 %x, 4
+  %r = icmp eq i32 %and, %sh
+  ret i1 %r
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-AVX: {{.*}}
+; CHECK-NOBMI-SSE2: {{.*}}



More information about the llvm-commits mailing list