[llvm] [X86][CodeGen] Support long instruction fixup for APX NDD instructions (PR #83578)

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 2 07:32:40 PST 2024


https://github.com/KanRobert updated https://github.com/llvm/llvm-project/pull/83578

>From d6acac33b7ada66b72d3f9adbfd597fb0aacaa08 Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Fri, 1 Mar 2024 10:04:29 +0800
Subject: [PATCH 1/5] [X86][CodeGen] Support long instruction fixup for APX NDD
 instructions

RFC: https://discourse.llvm.org/t/rfc-support-long-instruction-fixup-for-x86/76539
---
 .../lib/Target/X86/MCTargetDesc/X86BaseInfo.h |  29 +++
 .../X86/MCTargetDesc/X86MCCodeEmitter.cpp     |  13 +-
 llvm/lib/Target/X86/X86ExpandPseudo.cpp       |  81 +++++++
 .../CodeGen/X86/apx/long-instruction-fixup.ll | 212 ++++++++++++++++++
 4 files changed, 324 insertions(+), 11 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 4442b80861b61a..34cec486b85386 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -1315,6 +1315,35 @@ inline bool isKMasked(uint64_t TSFlags) {
 inline bool isKMergeMasked(uint64_t TSFlags) {
   return isKMasked(TSFlags) && (TSFlags & X86II::EVEX_Z) == 0;
 }
+
+/// \returns true if the intruction needs a SIB.
+inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) {
+  // The SIB byte must be used if there is an index register.
+  if (IndexReg)
+    return true;
+
+  // The SIB byte must be used if the base is ESP/RSP/R12/R20/R28, all of
+  // which encode to an R/M value of 4, which indicates that a SIB byte is
+  // present.
+  switch (BaseReg) {
+  default:
+    break;
+  case X86::ESP:
+  case X86::RSP:
+  case X86::R12:
+  case X86::R12D:
+  case X86::R20:
+  case X86::R20D:
+  case X86::R28:
+  case X86::R28D:
+    return true;
+  }
+
+  // If there is no base register and we're in 64-bit mode, we need a SIB
+  // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
+  return In64BitMode && !BaseReg;
+}
+
 } // namespace X86II
 } // namespace llvm
 #endif
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index f7c361393fea62..fdb11d1a408bb6 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -753,17 +753,8 @@ void X86MCCodeEmitter::emitMemModRMByte(
   bool AllowDisp8 = !UseDisp32;
 
   // Determine whether a SIB byte is needed.
-  if ( // The SIB byte must be used if there is an index register or the
-       // encoding requires a SIB byte.
-      !ForceSIB && IndexReg.getReg() == 0 &&
-      // The SIB byte must be used if the base is ESP/RSP/R12/R20/R28, all of
-      // which encode to an R/M value of 4, which indicates that a SIB byte is
-      // present.
-      BaseRegNo != N86::ESP &&
-      // If there is no base register and we're in 64-bit mode, we need a SIB
-      // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
-      (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
-
+  if (!ForceSIB && !X86II::needSIB(BaseReg, IndexReg.getReg(),
+                                   STI.hasFeature(X86::Is64Bit))) {
     if (BaseReg == 0) { // [disp32]     in X86-32 mode
       emitByte(modRMByte(0, RegOpcodeField, 5), CB);
       emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups);
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index b9fb3fdb239eef..e25c74707d6ad4 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -613,6 +613,87 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
   case X86::CALL64m_RVMARKER:
     expandCALL_RVMARKER(MBB, MBBI);
     return true;
+  case X86::ADD32mi_ND:
+  case X86::ADD64mi32_ND:
+  case X86::SUB32mi_ND:
+  case X86::SUB64mi32_ND:
+  case X86::AND32mi_ND:
+  case X86::AND64mi32_ND:
+  case X86::OR32mi_ND:
+  case X86::OR64mi32_ND:
+  case X86::XOR32mi_ND:
+  case X86::XOR64mi32_ND:
+  case X86::ADC32mi_ND:
+  case X86::ADC64mi32_ND:
+  case X86::SBB32mi_ND:
+  case X86::SBB64mi32_ND: {
+    // It's possible for an EVEX-encoded legacy instruction to reach the 15-byte
+    // instruction length limit: 4 bytes of EVEX prefix + 1 byte of opcode + 1
+    // byte of ModRM + 1 byte of SIB + 4 bytes of displacement + 4 bytes of
+    // immediate = 15 bytes in total, e.g.
+    //
+    //  addq    $184, -96, %rax
+    //
+    // In such a case, no additional segment override prefix can be used. To
+    // resolve the issue, we split the “long” instruction into 2 instructions:
+    //
+    //  subq    $184, %fs:257(%rbx, %rcx), %rax
+    //
+    //  ->
+    //
+    //  movq %fs:257(%rbx, %rcx),%rax
+    //  subq $184, %rax
+    int MemOpNo = X86::getFirstAddrOperandIdx(MI);
+    Register Segment = MI.getOperand(MemOpNo + X86::AddrSegmentReg).getReg();
+    if (Segment == X86::NoRegister)
+      return false;
+    const MachineOperand &ImmOp =
+        MI.getOperand(MI.getNumExplicitOperands() - 1);
+    // If the immediate is a expr, conservatively estimate 4 bytes.
+    if (ImmOp.isImm() && isInt<8>(ImmOp.getImm()))
+      return false;
+    Register Base = MI.getOperand(MemOpNo + X86::AddrBaseReg).getReg();
+    Register Index = MI.getOperand(MemOpNo + X86::AddrIndexReg).getReg();
+    if (!X86II::needSIB(Base, Index, /*In64BitMode=*/true))
+      return false;
+    const MachineOperand &DispOp = MI.getOperand(MemOpNo + X86::AddrDisp);
+    // If the displacement is a expr, conservatively estimate 4 bytes.
+    if (DispOp.isImm() && isInt<8>(DispOp.getImm()))
+      return false;
+    unsigned Opc, LoadOpc;
+    switch (Opcode) {
+#define MI_TO_RI(OP)                                                           \
+  case X86::OP##32mi_ND:                                                       \
+    Opc = X86::OP##32ri;                                                       \
+    LoadOpc = X86::MOV32rm;                                                    \
+    break;                                                                     \
+  case X86::OP##64mi32_ND:                                                     \
+    Opc = X86::OP##64ri32;                                                     \
+    LoadOpc = X86::MOV64rm;                                                    \
+    break;
+
+    default:
+      llvm_unreachable("Unexpected Opcode");
+      MI_TO_RI(ADD);
+      MI_TO_RI(SUB);
+      MI_TO_RI(AND);
+      MI_TO_RI(OR);
+      MI_TO_RI(XOR);
+      MI_TO_RI(ADC);
+      MI_TO_RI(SBB);
+#undef MI_TO_RI
+    }
+    // Insert OPri.
+    Register DestReg = MI.getOperand(0).getReg();
+    BuildMI(MBB, std::next(MBBI), DL, TII->get(Opc), DestReg)
+        .addReg(DestReg)
+        .add(ImmOp);
+    // Change OPmi_ND to MOVrm.
+    for (unsigned I = MI.getNumImplicitOperands() + 1; I != 0; --I)
+      MI.removeOperand(MI.getNumOperands() - 1);
+    MI.setDesc(TII->get(LoadOpc));
+    return true;
+  }
   }
   llvm_unreachable("Previous switch has a fallthrough?");
 }
diff --git a/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll b/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll
new file mode 100644
index 00000000000000..a253b81415bd51
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll
@@ -0,0 +1,212 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ndd -verify-machineinstrs | FileCheck %s
+
+define i32 @add32mi_GS() {
+; CHECK-LABEL: add32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    addl $123456, %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %add = add i32 %t, 123456
+  ret i32 %add
+}
+
+define i64 @add64mi_FS() {
+; CHECK-LABEL: add64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    addq $123456, %rax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %add = add i64 %t, 123456
+  ret i64 %add
+}
+
+define i32 @sub32mi_GS() {
+; CHECK-LABEL: sub32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    addl $129, %eax
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %sub = sub i32 %t, -129
+  ret i32 %sub
+}
+
+define i64 @sub64mi_FS() {
+; CHECK-LABEL: sub64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    subq $-2147483648, %rax # imm = 0x80000000
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %sub = sub i64 %t, -2147483648
+  ret i64 %sub
+}
+
+define i32 @and32mi_GS() {
+; CHECK-LABEL: and32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    andl $-129, %eax
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %and = and i32 %t, -129
+  ret i32 %and
+}
+
+define i64 @and64mi_FS() {
+; CHECK-LABEL: and64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    andq $-2147483648, %rax # imm = 0x80000000
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %and = and i64 %t, -2147483648
+  ret i64 %and
+}
+
+define i32 @or32mi_GS() {
+; CHECK-LABEL: or32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    orl $-129, %eax
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %or = or i32 %t, -129
+  ret i32 %or
+}
+
+define i64 @or64mi_FS() {
+; CHECK-LABEL: or64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    orq $-2147483648, %rax # imm = 0x80000000
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %or = or i64 %t, -2147483648
+  ret i64 %or
+}
+
+define i32 @xor32mi_GS() {
+; CHECK-LABEL: xor32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    xorl $-129, %eax
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %xor = xor i32 %t, -129
+  ret i32 %xor
+}
+
+define i64 @xor64mi_FS() {
+; CHECK-LABEL: xor64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    xorq $-2147483648, %rax # imm = 0x80000000
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %xor = xor i64 %t, -2147483648
+  ret i64 %xor
+}
+
+define i32 @adc32mi_GS(i32 %x, i32 %y) {
+; CHECK-LABEL: adc32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpl %edi, %esi
+; CHECK-NEXT:    movl %gs:255, %eax
+; CHECK-NEXT:    adcl $123456, %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %a = inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %s = add i32 %t, 123456
+  %k = icmp ugt i32 %x, %y
+  %z = zext i1 %k to i32
+  %r = add i32 %s, %z
+  ret i32 %r
+}
+
+define i64 @adc64mi_FS(i64 %x, i64 %y) {
+; CHECK-LABEL: adc64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpq %rdi, %rsi
+; CHECK-NEXT:    movq %fs:255, %rax
+; CHECK-NEXT:    adcq $123456, %rax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %a = inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %s = add i64 %t, 123456
+  %k = icmp ugt i64 %x, %y
+  %z = zext i1 %k to i64
+  %r = add i64 %s, %z
+  ret i64 %r
+}
+
+define i32 @sbb32mi_GS(i32 %x, i32 %y) {
+; CHECK-LABEL: sbb32mi_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpl %edi, %esi
+; CHECK-NEXT:    sbbl $0, %gs:255, %eax
+; CHECK-NEXT:    addl $-123456, %eax # imm = 0xFFFE1DC0
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %s = sub i32 %t, 123456
+  %k = icmp ugt i32 %x, %y
+  %z = zext i1 %k to i32
+  %r = sub i32 %s, %z
+  ret i32 %r
+}
+
+define i64 @sbb64mi_FS(i64 %x, i64 %y) {
+; CHECK-LABEL: sbb64mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpq %rdi, %rsi
+; CHECK-NEXT:    sbbq $0, %fs:255, %rax
+; CHECK-NEXT:    addq $-123456, %rax # imm = 0xFFFE1DC0
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i64 255 to ptr addrspace(257)
+  %t = load i64, ptr addrspace(257) %a
+  %s = sub i64 %t, 123456
+  %k = icmp ugt i64 %x, %y
+  %z = zext i1 %k to i64
+  %r = sub i64 %s, %z
+  ret i64 %r
+}
+
+define i32 @add32mi8_GS() {
+; CHECK-LABEL: add32mi8_GS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addl $127, %gs:255, %eax
+; CHECK-NEXT:    retq
+entry:
+  %a= inttoptr i32 255 to ptr addrspace(256)
+  %t = load i32, ptr addrspace(256) %a
+  %add = add i32 %t, 127
+  ret i32 %add
+}

>From b2d7e6d0619ad9a54f0e76a7fee49c619b869baa Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sat, 2 Mar 2024 10:47:07 +0800
Subject: [PATCH 2/5] address review comment: simplify switch

---
 llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 34cec486b85386..28a067d525e010 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -1327,7 +1327,9 @@ inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) {
   // present.
   switch (BaseReg) {
   default:
-    break;
+    // If there is no base register and we're in 64-bit mode, we need a SIB
+    // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
+    return In64BitMode && !BaseReg;
   case X86::ESP:
   case X86::RSP:
   case X86::R12:
@@ -1338,10 +1340,6 @@ inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) {
   case X86::R28D:
     return true;
   }
-
-  // If there is no base register and we're in 64-bit mode, we need a SIB
-  // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
-  return In64BitMode && !BaseReg;
 }
 
 } // namespace X86II

>From 54ddcd56c4760d1197c9c8fc2a27a38be7a60d57 Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sat, 2 Mar 2024 16:23:50 +0800
Subject: [PATCH 3/5] address review comment: handle x32

---
 llvm/lib/Target/X86/X86ExpandPseudo.cpp       | 30 +++++++++--------
 .../X86/apx/long-instruction-fixup-x32.ll     | 33 +++++++++++++++++++
 2 files changed, 49 insertions(+), 14 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll

diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index e25c74707d6ad4..3292ce25fd9ba2 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -632,34 +632,36 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
     // byte of ModRM + 1 byte of SIB + 4 bytes of displacement + 4 bytes of
     // immediate = 15 bytes in total, e.g.
     //
-    //  addq    $184, -96, %rax
-    //
-    // In such a case, no additional segment override prefix can be used. To
-    // resolve the issue, we split the “long” instruction into 2 instructions:
-    //
     //  subq    $184, %fs:257(%rbx, %rcx), %rax
     //
-    //  ->
+    // In such a case, no additional (ADSIZE or segment override) prefix can be
+    // used. To resolve the issue, we split the “long” instruction into 2
+    // instructions:
     //
     //  movq %fs:257(%rbx, %rcx),%rax
     //  subq $184, %rax
-    int MemOpNo = X86::getFirstAddrOperandIdx(MI);
-    Register Segment = MI.getOperand(MemOpNo + X86::AddrSegmentReg).getReg();
-    if (Segment == X86::NoRegister)
-      return false;
     const MachineOperand &ImmOp =
         MI.getOperand(MI.getNumExplicitOperands() - 1);
     // If the immediate is a expr, conservatively estimate 4 bytes.
     if (ImmOp.isImm() && isInt<8>(ImmOp.getImm()))
       return false;
-    Register Base = MI.getOperand(MemOpNo + X86::AddrBaseReg).getReg();
-    Register Index = MI.getOperand(MemOpNo + X86::AddrIndexReg).getReg();
-    if (!X86II::needSIB(Base, Index, /*In64BitMode=*/true))
-      return false;
+    int MemOpNo = X86::getFirstAddrOperandIdx(MI);
     const MachineOperand &DispOp = MI.getOperand(MemOpNo + X86::AddrDisp);
     // If the displacement is a expr, conservatively estimate 4 bytes.
     if (DispOp.isImm() && isInt<8>(DispOp.getImm()))
       return false;
+    // There can only be one of three: SIB, segment override register, ADSIZE
+    Register Segment = MI.getOperand(MemOpNo + X86::AddrSegmentReg).getReg();
+    Register Base = MI.getOperand(MemOpNo + X86::AddrBaseReg).getReg();
+    Register Index = MI.getOperand(MemOpNo + X86::AddrIndexReg).getReg();
+    unsigned Count = Segment ? 1 : 0;
+    if (X86II::needSIB(Base, Index, /*In64BitMode=*/true))
+      ++Count;
+    if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Base) ||
+        X86MCRegisterClasses[X86::GR32RegClassID].contains(Index))
+      ++Count;
+    if (Count < 2)
+      return false;
     unsigned Opc, LoadOpc;
     switch (Opcode) {
 #define MI_TO_RI(OP)                                                           \
diff --git a/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll b/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll
new file mode 100644
index 00000000000000..1f16b61f4399ff
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mattr=+ndd -verify-machineinstrs | FileCheck %s
+
+
+define i32 @add32mi(ptr nocapture noundef readonly %a, i32 noundef %b) {
+; CHECK-LABEL: add32mi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl 1164(%edi,%esi,4), %eax
+; CHECK-NEXT:    addl $4660, %eax # imm = 0x1234
+; CHECK-NEXT:    retq
+entry:
+  %add.ptr = getelementptr inbounds i32, ptr %a, i32 %b
+  %add.ptr1 = getelementptr inbounds i8, ptr %add.ptr, i32 1164
+  %0 = load i32, ptr %add.ptr1, align 4
+  %add = add nsw i32 %0, 4660
+  ret i32 %add
+}
+
+declare ptr @llvm.thread.pointer()
+
+define i32 @add32mi_FS(i32 %i) {
+; CHECK-LABEL: add32mi_FS:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %fs:0, %eax
+; CHECK-NEXT:    addl $4660, (%eax,%edi,4), %eax # imm = 0x1234
+; CHECK-NEXT:    retq
+entry:
+  %0 = tail call ptr @llvm.thread.pointer()
+  %arrayidx = getelementptr inbounds i32, ptr %0, i32 %i
+  %1 = load i32, ptr %arrayidx, align 4
+  %add = add nsw i32 %1, 4660
+  ret i32 %add
+}

>From b53dc69642db2d2c90757a43e87e444cf6cf168a Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sat, 2 Mar 2024 23:15:25 +0800
Subject: [PATCH 4/5] address review comment

---
 llvm/lib/Target/X86/X86ExpandPseudo.cpp       | 10 +++--
 .../X86/apx/long-instruction-fixup-x32.ll     | 39 ++++++++++++++++---
 2 files changed, 39 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index 3292ce25fd9ba2..f4c67f115c9f3d 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -640,6 +640,9 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
     //
     //  movq %fs:257(%rbx, %rcx),%rax
     //  subq $184, %rax
+    //
+    //  Therefore we consider the OPmi_ND to be a pseudo instruction to some
+    //  extent.
     const MachineOperand &ImmOp =
         MI.getOperand(MI.getNumExplicitOperands() - 1);
     // If the immediate is a expr, conservatively estimate 4 bytes.
@@ -647,14 +650,13 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
       return false;
     int MemOpNo = X86::getFirstAddrOperandIdx(MI);
     const MachineOperand &DispOp = MI.getOperand(MemOpNo + X86::AddrDisp);
+    Register Base = MI.getOperand(MemOpNo + X86::AddrBaseReg).getReg();
     // If the displacement is a expr, conservatively estimate 4 bytes.
-    if (DispOp.isImm() && isInt<8>(DispOp.getImm()))
+    if (Base && DispOp.isImm() && isInt<8>(DispOp.getImm()))
       return false;
     // There can only be one of three: SIB, segment override register, ADSIZE
-    Register Segment = MI.getOperand(MemOpNo + X86::AddrSegmentReg).getReg();
-    Register Base = MI.getOperand(MemOpNo + X86::AddrBaseReg).getReg();
     Register Index = MI.getOperand(MemOpNo + X86::AddrIndexReg).getReg();
-    unsigned Count = Segment ? 1 : 0;
+    unsigned Count = !!MI.getOperand(MemOpNo + X86::AddrSegmentReg).getReg();
     if (X86II::needSIB(Base, Index, /*In64BitMode=*/true))
       ++Count;
     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Base) ||
diff --git a/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll b/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll
index 1f16b61f4399ff..fb24704fe0491a 100644
--- a/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll
+++ b/llvm/test/CodeGen/X86/apx/long-instruction-fixup-x32.ll
@@ -2,8 +2,8 @@
 ; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mattr=+ndd -verify-machineinstrs | FileCheck %s
 
 
-define i32 @add32mi(ptr nocapture noundef readonly %a, i32 noundef %b) {
-; CHECK-LABEL: add32mi:
+define i32 @add32mi_SIB_ADSIZE(ptr nocapture noundef readonly %a, i32 noundef %b) {
+; CHECK-LABEL: add32mi_SIB_ADSIZE:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl 1164(%edi,%esi,4), %eax
 ; CHECK-NEXT:    addl $4660, %eax # imm = 0x1234
@@ -11,15 +11,15 @@ define i32 @add32mi(ptr nocapture noundef readonly %a, i32 noundef %b) {
 entry:
   %add.ptr = getelementptr inbounds i32, ptr %a, i32 %b
   %add.ptr1 = getelementptr inbounds i8, ptr %add.ptr, i32 1164
-  %0 = load i32, ptr %add.ptr1, align 4
+  %0 = load i32, ptr %add.ptr1
   %add = add nsw i32 %0, 4660
   ret i32 %add
 }
 
 declare ptr @llvm.thread.pointer()
 
-define i32 @add32mi_FS(i32 %i) {
-; CHECK-LABEL: add32mi_FS:
+define i32 @add32mi_FS_ADSIZE(i32 %i) {
+; CHECK-LABEL: add32mi_FS_ADSIZE:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl %fs:0, %eax
 ; CHECK-NEXT:    addl $4660, (%eax,%edi,4), %eax # imm = 0x1234
@@ -27,7 +27,34 @@ define i32 @add32mi_FS(i32 %i) {
 entry:
   %0 = tail call ptr @llvm.thread.pointer()
   %arrayidx = getelementptr inbounds i32, ptr %0, i32 %i
-  %1 = load i32, ptr %arrayidx, align 4
+  %1 = load i32, ptr %arrayidx
   %add = add nsw i32 %1, 4660
   ret i32 %add
 }
+
+define i32 @add32mi_FS_SIB(i32 %i) {
+; CHECK-LABEL: add32mi_FS_SIB:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %fs:0, %eax
+; CHECK-NEXT:    addl $4660, %eax # imm = 0x1234
+; CHECK-NEXT:    retq
+entry:
+  %0 = tail call ptr @llvm.thread.pointer()
+  %arrayidx = getelementptr inbounds i32, ptr %0, i32 0
+  %1 = load i32, ptr %arrayidx
+  %add = add nsw i32 %1, 4660
+  ret i32 %add
+}
+
+define i32 @add32mi_GS_ADSIZE(ptr addrspace(256) %a) {
+; CHECK-LABEL: add32mi_GS_ADSIZE:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %gs:4936(%edi), %eax
+; CHECK-NEXT:    addl $123456, %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(256) %a, i32 1234
+  %t = load i32, ptr addrspace(256) %arrayidx
+  %add = add i32 %t, 123456
+  ret i32 %add
+}

>From ab2c6eaff4351fbfe84ea025ef54f7f94b8df4ae Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sat, 2 Mar 2024 23:32:26 +0800
Subject: [PATCH 5/5] add more test

---
 .../CodeGen/X86/apx/long-instruction-fixup.ll | 42 +++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll b/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll
index a253b81415bd51..30c485836797f5 100644
--- a/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll
+++ b/llvm/test/CodeGen/X86/apx/long-instruction-fixup.ll
@@ -210,3 +210,45 @@ entry:
   %add = add i32 %t, 127
   ret i32 %add
 }
+
+define i32 @add32mi_GS_Disp0(ptr addrspace(256) %a, i32 %i) {
+; CHECK-LABEL: add32mi_GS_Disp0:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movslq %esi, %rax
+; CHECK-NEXT:    addl $123456, %gs:(%rdi,%rax,4), %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(256) %a, i32 %i
+  %t = load i32, ptr addrspace(256) %arrayidx
+  %add = add i32 %t, 123456
+  ret i32 %add
+}
+
+define i32 @add32mi_GS_Disp8(ptr addrspace(256) %a, i32 %i) {
+; CHECK-LABEL: add32mi_GS_Disp8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movslq %esi, %rax
+; CHECK-NEXT:    addl $123456, %gs:123(%rdi,%rax,4), %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(256) %a, i32 %i
+  %add.ptr = getelementptr inbounds i8, ptr addrspace(256) %arrayidx, i32 123
+  %t = load i32, ptr addrspace(256) %add.ptr
+  %add = add i32 %t, 123456
+  ret i32 %add
+}
+
+define i32 @add32mi_GS_Disp32(ptr addrspace(256) %a, i32 %i) {
+; CHECK-LABEL: add32mi_GS_Disp32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movslq %esi, %rax
+; CHECK-NEXT:    movl %gs:1234(%rdi,%rax,4), %eax
+; CHECK-NEXT:    addl $123456, %eax # imm = 0x1E240
+; CHECK-NEXT:    retq
+entry:
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(256) %a, i32 %i
+  %add.ptr = getelementptr inbounds i8, ptr addrspace(256) %arrayidx, i32 1234
+  %t = load i32, ptr addrspace(256) %add.ptr
+  %add = add i32 %t, 123456
+  ret i32 %add
+}



More information about the llvm-commits mailing list