[llvm] [X86] Use RORX over SHR imm (PR #77964)

Bryce Wilson via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 12 13:59:33 PST 2024


https://github.com/Bryce-MW updated https://github.com/llvm/llvm-project/pull/77964

>From d8aab1f0f44abe404e1c183198ed93a59e362821 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 11:58:50 -0600
Subject: [PATCH 1/4] [X86] Use RORX over SHR imm

SHRX is preferred over SHR to avoid setting flags but only for variable shifts. If the output of SHR is being truncated, and the immediate shift is less than the number of bits in the source register minus the number of bits in the result, RORX can be used instead. The most common case would be extracting the top half of a register. I could also see it being used when extracting a byte from a larger register.

I am new to tablegen so I am sure this is not being done in the best way.

As far as I can tell, rorx has the same performance characteristics as shr other than not impacting flags.

The following example was my motivation for doing this:
```c
#include <immintrin.h>

unsigned short checksum(const int* data) {
  const int len = 5;
  unsigned out = data[0];
  unsigned int carry = 0;

  #pragma clang loop unroll(enable)
  for (unsigned int i = 1; i < len; i++) {
    out = __builtin_addc(out, data[i], carry, &carry);
  }
  out = __builtin_addcs((unsigned short)out, (unsigned short)(out >> 16), (unsigned short)carry, (unsigned short*)&carry);
  out += carry;
  return ~(unsigned short)out;
}
```
Currently produces:
```asm
checksum:
        mov     ecx, dword ptr [rdi]
        add     ecx, dword ptr [rdi + 4]
        adc     ecx, dword ptr [rdi + 8]
        adc     ecx, dword ptr [rdi + 12]
        adc     ecx, dword ptr [rdi + 16]
        setb    dl
        mov     eax, ecx
        shr     eax, 16
        add     dl, 255
        adc     ax, cx
        adc     ax, 0
        not     eax
        ret
```
With these changes, it produces:
```asm
checksum:
        mov     ecx, dword ptr [rdi]
        add     ecx, dword ptr [rdi + 4]
        adc     ecx, dword ptr [rdi + 8]
        adc     ecx, dword ptr [rdi + 12]
        adc     ecx, dword ptr [rdi + 16]
        rorx    eax, ecx, 16
        adc     ax, cx
        adc     ax, 0
        not     eax
        ret
```
---
 llvm/lib/Target/X86/X86InstrShiftRotate.td | 78 ++++++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index f951894db1890c..c9e7e1a6eae68b 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -879,6 +879,26 @@ let Predicates = [HasBMI2, HasEGPR, In64BitMode] in {
   defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem, "_EVEX">, T8, PD, REX_W, EVEX;
 }
 
+
+def immle16_8 : ImmLeaf<i8, [{
+  return Imm <= 16 - 8;
+}]>;
+def immle32_8 : ImmLeaf<i8, [{
+  return Imm <= 32 - 8;
+}]>;
+def immle64_8 : ImmLeaf<i8, [{
+  return Imm <= 64 - 8;
+}]>;
+def immle32_16 : ImmLeaf<i8, [{
+  return Imm <= 32 - 16;
+}]>;
+def immle64_16 : ImmLeaf<i8, [{
+  return Imm <= 64 - 16;
+}]>;
+def immle64_32 : ImmLeaf<i8, [{
+  return Imm <= 64 - 32;
+}]>;
+
 let Predicates = [HasBMI2] in {
   // Prefer RORX which is non-destructive and doesn't update EFLAGS.
   let AddedComplexity = 10 in {
@@ -891,6 +911,64 @@ let Predicates = [HasBMI2] in {
               (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
     def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
               (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
+
+    // A right shift by less than a smaller register size that is then
+    // truncated to that register size can be replaced by RORX to
+    // preserve flags with the same execution cost
+
+    def : Pat<(i8 (trunc (srl GR16:$src, (i8 immle16_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR16:$src, (i8 immle16_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl GR32:$src, (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR32:$src, (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl GR64:$src, (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra GR64:$src, (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+
+
+    def : Pat<(i16 (trunc (srl GR32:$src, (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra GR32:$src, (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (srl GR64:$src, (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra GR64:$src, (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+
+    def : Pat<(i32 (trunc (srl GR64:$src, (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
+    def : Pat<(i32 (trunc (sra GR64:$src, (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
+
+
+    // Can't expand the load
+    def : Pat<(i8 (trunc (srl (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (srl (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+    def : Pat<(i8 (trunc (sra (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+
+
+    def : Pat<(i16 (trunc (srl (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (srl (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+    def : Pat<(i16 (trunc (sra (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
+              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+
+    def : Pat<(i32 (trunc (srl (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;
+    def : Pat<(i32 (trunc (sra (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;
   }
 
   def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),

>From 6459c1dfc4f131b6a4cc1f559cf80667e6e4fede Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:57:03 -0600
Subject: [PATCH 2/4] [X86] Fix RORX patterns

---
 llvm/lib/Target/X86/X86InstrShiftRotate.td | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index c9e7e1a6eae68b..238e8e9b6e97f3 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -925,9 +925,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i8 (trunc (sra GR32:$src, (i8 immle32_8:$shamt)))),
               (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (srl GR64:$src, (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (sra GR64:$src, (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_8bit)>;
 
 
     def : Pat<(i16 (trunc (srl GR32:$src, (i8 immle32_16:$shamt)))),
@@ -935,9 +935,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i16 (trunc (sra GR32:$src, (i8 immle32_16:$shamt)))),
               (EXTRACT_SUBREG (RORX32ri GR32:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (srl GR64:$src, (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (sra GR64:$src, (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32ri (EXTRACT_SUBREG GR64:$src, sub_32bit), imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_16bit)>;
 
     def : Pat<(i32 (trunc (srl GR64:$src, (i8 immle64_32:$shamt)))),
               (EXTRACT_SUBREG (RORX64ri GR64:$src, imm:$shamt), sub_32bit)>;
@@ -951,9 +951,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i8 (trunc (sra (loadi32 addr:$src), (i8 immle32_8:$shamt)))),
               (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (srl (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_8bit)>;
     def : Pat<(i8 (trunc (sra (loadi64 addr:$src), (i8 immle64_8:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_8bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_8bit)>;
 
 
     def : Pat<(i16 (trunc (srl (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
@@ -961,9 +961,9 @@ let Predicates = [HasBMI2] in {
     def : Pat<(i16 (trunc (sra (loadi32 addr:$src), (i8 immle32_16:$shamt)))),
               (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (srl (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_16bit)>;
     def : Pat<(i16 (trunc (sra (loadi64 addr:$src), (i8 immle64_16:$shamt)))),
-              (EXTRACT_SUBREG (RORX32mi addr:$src, imm:$shamt), sub_16bit)>;
+              (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_16bit)>;
 
     def : Pat<(i32 (trunc (srl (loadi64 addr:$src), (i8 immle64_32:$shamt)))),
               (EXTRACT_SUBREG (RORX64mi addr:$src, imm:$shamt), sub_32bit)>;

>From 6202a22c0deee13fe40b1ff9730b1c5642289bd1 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:58:30 -0600
Subject: [PATCH 3/4] Update atomic-unordered.ll

---
 llvm/test/CodeGen/X86/atomic-unordered.ll | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index df123be53474f0..c867817fd2dfff 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -2062,8 +2062,7 @@ define i32 @split_load(ptr %p) {
 ; CHECK-O3-LABEL: split_load:
 ; CHECK-O3:       # %bb.0:
 ; CHECK-O3-NEXT:    movq (%rdi), %rax
-; CHECK-O3-NEXT:    movq %rax, %rcx
-; CHECK-O3-NEXT:    shrq $32, %rcx
+; CHECK-O3-NEXT:    rorxq $32, %rax, %rcx
 ; CHECK-O3-NEXT:    orl %eax, %ecx
 ; CHECK-O3-NEXT:    movzbl %cl, %eax
 ; CHECK-O3-NEXT:    retq

>From dcd7e86bb27a7acbe5477e730dd63202527adc10 Mon Sep 17 00:00:00 2001
From: Bryce Wilson <bryce at brycemw.ca>
Date: Fri, 12 Jan 2024 15:59:25 -0600
Subject: [PATCH 4/4] Update bmi2.ll

---
 llvm/test/CodeGen/X86/bmi2.ll | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/X86/bmi2.ll b/llvm/test/CodeGen/X86/bmi2.ll
index 24e38cfeb704df..e81434b35096ac 100644
--- a/llvm/test/CodeGen/X86/bmi2.ll
+++ b/llvm/test/CodeGen/X86/bmi2.ll
@@ -310,8 +310,7 @@ define i32 @mulx32(i32 %x, i32 %y, ptr %p)   {
 ; X64-NEXT:    addl %edi, %edi
 ; X64-NEXT:    leal (%rsi,%rsi), %eax
 ; X64-NEXT:    imulq %rdi, %rax
-; X64-NEXT:    movq %rax, %rcx
-; X64-NEXT:    shrq $32, %rcx
+; X64-NEXT:    rorxq $32, %rax, %rcx
 ; X64-NEXT:    movl %ecx, (%rdx)
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq
@@ -344,8 +343,7 @@ define i32 @mulx32_load(i32 %x, ptr %y, ptr %p)   {
 ; X64-NEXT:    leal (%rdi,%rdi), %eax
 ; X64-NEXT:    movl (%rsi), %ecx
 ; X64-NEXT:    imulq %rcx, %rax
-; X64-NEXT:    movq %rax, %rcx
-; X64-NEXT:    shrq $32, %rcx
+; X64-NEXT:    rorxq $32, %rax, %rcx
 ; X64-NEXT:    movl %ecx, (%rdx)
 ; X64-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-NEXT:    retq



More information about the llvm-commits mailing list