[llvm] [X86] matchAddressRecursively - move ZERO_EXTEND patterns into matchIndexRecursively (PR #85081)

via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 28 21:44:26 PDT 2024


https://github.com/RicoAfoat updated https://github.com/llvm/llvm-project/pull/85081

>From f5de21173b2cf9c011fb01390309321d05401331 Mon Sep 17 00:00:00 2001
From: RicoAfoat <3094209316 at qq.com>
Date: Mon, 29 Apr 2024 12:36:59 +0800
Subject: [PATCH] [SelectionDAG] use HandleSDNode instead of SDValue  during
 SelectInlineAsmMemoryOperands

---
 .../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 43 +++++++------
 llvm/test/CodeGen/X86/inline-asm-memop.ll     | 64 ++++++++++++++++---
 2 files changed, 80 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index c78c3ed294e46b..84c257c16f389f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2111,24 +2111,27 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
 /// by tblgen.  Others should not call it.
 void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
                                                      const SDLoc &DL) {
-  std::vector<SDValue> InOps;
-  std::swap(InOps, Ops);
+  // change the vector of SDValue into a list of SDNodeHandle for x86 might call
+  // replaceAllUses when matching address
 
-  Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
-  Ops.push_back(InOps[InlineAsm::Op_AsmString]);  // 1
-  Ops.push_back(InOps[InlineAsm::Op_MDNode]);     // 2, !srcloc
-  Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]);  // 3 (SideEffect, AlignStack)
+  std::list<HandleSDNode> handles;
 
-  unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
-  if (InOps[e-1].getValueType() == MVT::Glue)
+  handles.emplace_back(Ops[InlineAsm::Op_InputChain]); // 0
+  handles.emplace_back(Ops[InlineAsm::Op_AsmString]);  // 1
+  handles.emplace_back(Ops[InlineAsm::Op_MDNode]);     // 2, !srcloc
+  handles.emplace_back(
+      Ops[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
+
+  unsigned i = InlineAsm::Op_FirstOperand, e = Ops.size();
+  if (Ops[e - 1].getValueType() == MVT::Glue)
     --e;  // Don't process a glue operand if it is here.
 
   while (i != e) {
-    InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
+    InlineAsm::Flag Flags(Ops[i]->getAsZExtVal());
     if (!Flags.isMemKind() && !Flags.isFuncKind()) {
       // Just skip over this operand, copying the operands verbatim.
-      Ops.insert(Ops.end(), InOps.begin() + i,
-                 InOps.begin() + i + Flags.getNumOperandRegisters() + 1);
+      handles.insert(handles.end(), Ops.begin() + i,
+                     Ops.begin() + i + Flags.getNumOperandRegisters() + 1);
       i += Flags.getNumOperandRegisters() + 1;
     } else {
       assert(Flags.getNumOperandRegisters() == 1 &&
@@ -2138,10 +2141,10 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
       if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
         // We need the constraint ID from the operand this is tied to.
         unsigned CurOp = InlineAsm::Op_FirstOperand;
-        Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
+        Flags = InlineAsm::Flag(Ops[CurOp]->getAsZExtVal());
         for (; TiedToOperand; --TiedToOperand) {
           CurOp += Flags.getNumOperandRegisters() + 1;
-          Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
+          Flags = InlineAsm::Flag(Ops[CurOp]->getAsZExtVal());
         }
       }
 
@@ -2149,7 +2152,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
       std::vector<SDValue> SelOps;
       const InlineAsm::ConstraintCode ConstraintID =
           Flags.getMemoryConstraintID();
-      if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
+      if (SelectInlineAsmMemoryOperand(Ops[i + 1], ConstraintID, SelOps))
         report_fatal_error("Could not match memory address.  Inline asm"
                            " failure!");
 
@@ -2158,15 +2161,19 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
                                                 : InlineAsm::Kind::Func,
                               SelOps.size());
       Flags.setMemConstraint(ConstraintID);
-      Ops.push_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32));
-      llvm::append_range(Ops, SelOps);
+      handles.emplace_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32));
+      handles.insert(handles.end(), SelOps.begin(), SelOps.end());
       i += 2;
     }
   }
 
   // Add the glue input back if present.
-  if (e != InOps.size())
-    Ops.push_back(InOps.back());
+  if (e != Ops.size())
+    handles.emplace_back(Ops.back());
+
+  Ops.clear();
+  for (auto &i : handles)
+    Ops.push_back(i.getValue());
 }
 
 /// findGlueUse - Return use of MVT::Glue value produced by the specified
diff --git a/llvm/test/CodeGen/X86/inline-asm-memop.ll b/llvm/test/CodeGen/X86/inline-asm-memop.ll
index 83442498076102..5d9a40f93cea44 100644
--- a/llvm/test/CodeGen/X86/inline-asm-memop.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-memop.ll
@@ -1,20 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=x86_64-unknown-linux-gnu -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
 
 ; A bug in X86DAGToDAGISel::matchAddressRecursively create a zext SDValue which
 ; is quickly replaced by other SDValue but already pushed into vector for later
 ; calling for SelectionDAGISel::Select_INLINEASM getNode builder, see issue
 ; 82431 for more infomation.
 
-define void @PR82431(i8 %call, ptr %b) {
-; CHECK-LABEL: PR82431:
+define i64 @PR82431_0(i8 %call, ptr %b) {
+; CHECK-LABEL: PR82431_0:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movb %dil, %al
-; CHECK-NEXT:    addb $1, %al
-; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    # kill: def $rax killed $eax
-; CHECK-NEXT:    shlq $3, %rax
-; CHECK-NEXT:    addq %rax, %rsi
+; CHECK-NEXT:    movzbl %dil, %eax
+; CHECK-NEXT:    movq 8(%rsi,%rax,8), %rax
+; CHECK-NEXT:    retq
+entry:
+  %narrow = add nuw i8 %call, 1
+  %idxprom = zext i8 %narrow to i64
+  %arrayidx = getelementptr [1 x i64], ptr %b, i64 0, i64 %idxprom
+  %ret_val = load i64, ptr %arrayidx
+  ret i64 %ret_val
+}
+
+define i32 @PR82431_1(i32 %0, ptr %f) {
+; CHECK-LABEL: PR82431_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    addl %edi, %edi
+; CHECK-NEXT:    andl $8, %edi
+; CHECK-NEXT:    movl 4(%rsi,%rdi), %eax
+; CHECK-NEXT:    retq
+entry:
+  %shr = lshr i32 %0, 1
+  %and = and i32 %shr, 2
+  %add = or i32 %and, 1
+  %idxprom = zext i32 %add to i64
+  %arrayidx = getelementptr [0 x i32], ptr %f, i64 0, i64 %idxprom
+  %ret_val = load i32, ptr %arrayidx
+  ret i32 %ret_val
+}
+
+define void @PR82431_2(i8 %call, ptr %b) {
+; CHECK-LABEL: PR82431_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    retq
@@ -25,3 +52,22 @@ entry:
   tail call void asm "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %arrayidx, ptr elementtype(i64) %arrayidx)
   ret void
 }
+
+define void @PR82431_3(i32 %0, ptr %f) {
+; CHECK-LABEL: PR82431_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT:    andl $4, %edi
+; CHECK-NEXT:    leaq (%rdi,%rdi), %rax
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    retq
+entry:
+  %shr = lshr i32 %0, 1
+  %and = and i32 %shr, 2
+  %add = or i32 %and, 1
+  %idxprom = zext i32 %add to i64
+  %arrayidx = getelementptr [0 x i32], ptr %f, i64 0, i64 %idxprom
+  tail call void asm "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %arrayidx, ptr elementtype(i32) %arrayidx)
+  ret void
+}



More information about the llvm-commits mailing list