[llvm] [X86] Add missing immediate qualifier to the (V)INSERTPS instruction names (PR #108568)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 13 10:06:59 PDT 2024


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/108568

>From 4a0ca2039c8d6868032d13990355310bde5277ec Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 13 Sep 2024 15:26:09 +0100
Subject: [PATCH 1/2] [X86] Add missing immediate qualifier to the (V)INSERTPS
 instruction names

Matches (V)BLENDPS etc and makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on
---
 .../X86/MCTargetDesc/X86InstComments.cpp      | 12 +++++------
 llvm/lib/Target/X86/X86InstrAVX512.td         |  4 ++--
 llvm/lib/Target/X86/X86InstrInfo.cpp          | 20 +++++++++----------
 llvm/lib/Target/X86/X86InstrSSE.td            |  4 ++--
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index 9cc72d32d85f94..4f096460d3c7c7 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -703,14 +703,14 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
     DestName = getRegName(MI->getOperand(0).getReg());
     break;
 
-  case X86::INSERTPSrr:
-  case X86::VINSERTPSrr:
-  case X86::VINSERTPSZrr:
+  case X86::INSERTPSrri:
+  case X86::VINSERTPSrri:
+  case X86::VINSERTPSZrri:
     Src2Name = getRegName(MI->getOperand(2).getReg());
     [[fallthrough]];
-  case X86::INSERTPSrm:
-  case X86::VINSERTPSrm:
-  case X86::VINSERTPSZrm:
+  case X86::INSERTPSrmi:
+  case X86::VINSERTPSrmi:
+  case X86::VINSERTPSZrmi:
     DestName = getRegName(MI->getOperand(0).getReg());
     Src1Name = getRegName(MI->getOperand(1).getReg());
     if (MI->getOperand(NumOperands - 1).isImm())
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index c9885242131238..bdbd7758d4ca70 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -649,12 +649,12 @@ defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
 // vinsertps - insert f32 to XMM
 let ExeDomain = SSEPackedSingle in {
 let isCommutable = 1 in
-def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
+def VINSERTPSZrri : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
       (ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
       "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
       [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, timm:$src3))]>,
       EVEX, VVVV, Sched<[SchedWriteFShuffle.XMM]>;
-def VINSERTPSZrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
+def VINSERTPSZrmi : AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
       (ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
       "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
       [(set VR128X:$dst, (X86insertps VR128X:$src1,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 401b8ce71edaf5..378eb00443ab6e 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2424,9 +2424,9 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
     WorkingMI->getOperand(3).setImm(Mask ^ Imm);
     break;
   }
-  case X86::INSERTPSrr:
-  case X86::VINSERTPSrr:
-  case X86::VINSERTPSZrr: {
+  case X86::INSERTPSrri:
+  case X86::VINSERTPSrri:
+  case X86::VINSERTPSZrri: {
     unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
     unsigned ZMask = Imm & 15;
     unsigned DstIdx = (Imm >> 4) & 3;
@@ -7274,9 +7274,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
     ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
     unsigned Size, Align Alignment) const {
   switch (MI.getOpcode()) {
-  case X86::INSERTPSrr:
-  case X86::VINSERTPSrr:
-  case X86::VINSERTPSZrr:
+  case X86::INSERTPSrri:
+  case X86::VINSERTPSrri:
+  case X86::VINSERTPSZrri:
     // Attempt to convert the load of inserted vector into a fold load
     // of a single float.
     if (OpNum == 2) {
@@ -7289,13 +7289,13 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
       const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
       unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
       if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
-          (MI.getOpcode() != X86::INSERTPSrr || Alignment >= Align(4))) {
+          (MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
         int PtrOffset = SrcIdx * 4;
         unsigned NewImm = (DstIdx << 4) | ZMask;
         unsigned NewOpCode =
-            (MI.getOpcode() == X86::VINSERTPSZrr)  ? X86::VINSERTPSZrm
-            : (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm
-                                                   : X86::INSERTPSrm;
+            (MI.getOpcode() == X86::VINSERTPSZrri)  ? X86::VINSERTPSZrmi
+            : (MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
+                                                    : X86::INSERTPSrmi;
         MachineInstr *NewMI =
             fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
         NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 4e5f2e3f872ad4..6fc7876e1d7d28 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -5447,7 +5447,7 @@ let Constraints = "$src1 = $dst" in
 // in the target vector.
 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
   let isCommutable = 1 in
-  def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+  def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
       (ins VR128:$src1, VR128:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5456,7 +5456,7 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
       [(set VR128:$dst,
         (X86insertps VR128:$src1, VR128:$src2, timm:$src3))]>,
       Sched<[SchedWriteFShuffle.XMM]>;
-  def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+  def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
       (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),

>From 778f1066526f0eb9497f95e8d6724265109289fe Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 13 Sep 2024 18:05:50 +0100
Subject: [PATCH 2/2] Fix tests

---
 llvm/test/CodeGen/X86/evex-to-vex-compress.mir | 16 ++++++++--------
 llvm/utils/TableGen/X86ManualFoldTables.def    | 14 +++++++-------
 2 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 13c9585873273a..9cf087000cb60e 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -2304,10 +2304,10 @@ body: |
   VEXTRACTPSZmr                                $rdi, 1, $noreg, 0, $noreg, $xmm0, 1
   ; CHECK: $eax = VEXTRACTPSrr                 $xmm0, 1
   $eax = VEXTRACTPSZrr                         $xmm0, 1
-  ; CHECK: $xmm0 = VINSERTPSrm                 $xmm0, $rdi, 1, $noreg, 0, $noreg, 1
-  $xmm0 = VINSERTPSZrm                         $xmm0, $rdi, 1, $noreg, 0, $noreg, 1
-  ; CHECK: $xmm0 = VINSERTPSrr                 $xmm0, $xmm0, 1
-  $xmm0 = VINSERTPSZrr                         $xmm0, $xmm0, 1
+  ; CHECK: $xmm0 = VINSERTPSrmi                $xmm0, $rdi, 1, $noreg, 0, $noreg, 1
+  $xmm0 = VINSERTPSZrmi                        $xmm0, $rdi, 1, $noreg, 0, $noreg, 1
+  ; CHECK: $xmm0 = VINSERTPSrri                $xmm0, $xmm0, 1
+  $xmm0 = VINSERTPSZrri                        $xmm0, $xmm0, 1
   ; CHECK: $xmm0 = VROUNDSDmi                  $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   $xmm0 = VRNDSCALESDZm                        $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSDri                  $xmm0, $xmm1, 15, implicit $mxcsr
@@ -4072,10 +4072,10 @@ body: |
   VEXTRACTPSZmr                                $rdi, 1, $noreg, 0, $noreg, $xmm16, 1
   ; CHECK: $eax = VEXTRACTPSZrr                $xmm16, 1
   $eax = VEXTRACTPSZrr                         $xmm16, 1
-  ; CHECK: $xmm16 = VINSERTPSZrm               $xmm16, $rdi, 1, $noreg, 0, $noreg, 1
-  $xmm16 = VINSERTPSZrm                        $xmm16, $rdi, 1, $noreg, 0, $noreg, 1
-  ; CHECK: $xmm16 = VINSERTPSZrr               $xmm16, $xmm16, 1
-  $xmm16 = VINSERTPSZrr                        $xmm16, $xmm16, 1
+  ; CHECK: $xmm16 = VINSERTPSZrmi              $xmm16, $rdi, 1, $noreg, 0, $noreg, 1
+  $xmm16 = VINSERTPSZrmi                       $xmm16, $rdi, 1, $noreg, 0, $noreg, 1
+  ; CHECK: $xmm16 = VINSERTPSZrri              $xmm16, $xmm16, 1
+  $xmm16 = VINSERTPSZrri                       $xmm16, $xmm16, 1
   ; CHECK: $xmm16 = VRNDSCALEPDZ128rmi         $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   $xmm16 = VRNDSCALEPDZ128rmi                  $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm16 = VRNDSCALEPDZ128rri         $xmm16, 15, implicit $mxcsr
diff --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def
index 2ebd92883c0719..c51bc9748d79b6 100644
--- a/llvm/utils/TableGen/X86ManualFoldTables.def
+++ b/llvm/utils/TableGen/X86ManualFoldTables.def
@@ -215,18 +215,18 @@ NOFOLD(UD1Wr)
 // Exclude these two b/c they would conflict with {MMX_MOVD64from64rr, MMX_MOVQ64mr} in unfolding table
 NOFOLD(MMX_MOVQ64rr)
 NOFOLD(MMX_MOVQ64rr_REV)
-// INSERTPSrm has no count_s while INSERTPSrr has count_s.
+// INSERTPSrmi has no count_s while INSERTPSrri has count_s.
 // count_s is to indicate which element in dst vector is inserted.
-// if count_s!=0, we can not fold INSERTPSrr into INSERTPSrm
+// if count_s!=0, we can not fold INSERTPSrri into INSERTPSrmi
 //
 // the following folding can happen when count_s==0
 // load xmm0, m32
-// insertpsrr xmm1, xmm0, imm
+// INSERTPSrri xmm1, xmm0, imm
 // =>
-// insertpsrm xmm1, m32, imm
-NOFOLD(INSERTPSrr)
-NOFOLD(VINSERTPSZrr)
-NOFOLD(VINSERTPSrr)
+// INSERTPSrmi xmm1, m32, imm
+NOFOLD(INSERTPSrri)
+NOFOLD(VINSERTPSZrri)
+NOFOLD(VINSERTPSrri)
 // Memory faults are suppressed for CFCMOV with memory operand.
 NOFOLD(CFCMOV16rr_REV)
 NOFOLD(CFCMOV32rr_REV)



More information about the llvm-commits mailing list