[llvm] [X86][ConstraintFP] Model rounding control registers for inline asm (PR #92846)

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Mon May 20 20:24:54 PDT 2024


https://github.com/phoebewang created https://github.com/llvm/llvm-project/pull/92846

We have an internal test affected by https://github.com/llvm/llvm-project/commit/a69673615bb9f14794056470a32f70f60a52213d The reason is we haven't modeled rounding control registers under constraint FP.
Here is a reduced case:  https://godbolt.org/z/s4EsKP94e

>From ac3b2ad1e76c9d9accd83dbc7c60ddc2062e50ee Mon Sep 17 00:00:00 2001
From: Phoebe Wang <phoebe.wang at intel.com>
Date: Tue, 21 May 2024 11:18:35 +0800
Subject: [PATCH] [X86][ConstraintFP] Model rounding control registers for
 inline asm

We have an internal test affected by https://github.com/llvm/llvm-project/commit/a69673615bb9f14794056470a32f70f60a52213d
The reason is we haven't modeled rounding control registers under
constraint FP.
Here is a reduced case:  https://godbolt.org/z/s4EsKP94e
---
 llvm/include/llvm/CodeGen/TargetLowering.h    |  3 +-
 .../lib/CodeGen/SelectionDAG/InstrEmitter.cpp |  7 ++
 .../Target/AArch64/AArch64ISelLowering.cpp    |  3 +-
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  3 +-
 llvm/lib/Target/X86/X86ISelLowering.h         |  3 +-
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp   |  8 ++-
 llvm/test/CodeGen/X86/pr59305.ll              | 69 ++++++++++++++++++-
 7 files changed, 89 insertions(+), 7 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 50a8c7eb75af5..da71121a56a38 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4817,7 +4817,8 @@ class TargetLowering : public TargetLoweringBase {
 
   /// Returns a 0 terminated array of rounding control registers that can be
   /// attached into strict FP call.
-  virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const {
+  virtual ArrayRef<MCPhysReg>
+  getRoundingControlRegisters(const char *AsmStr = nullptr) const {
     return ArrayRef<MCPhysReg>();
   }
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 759368a67a16c..709e7e0ebb022 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1412,6 +1412,13 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
       }
     }
 
+    // Add rounding control registers as implicit def for inline asm.
+    if (MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
+      ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(AsmStr);
+      for (MCPhysReg Reg : RCRegs)
+        MIB.addReg(Reg, RegState::ImplicitDefine);
+    }
+
     // GCC inline assembly allows input operands to also be early-clobber
     // output operands (so long as the operand is written only after it's
     // used), but this does not match the semantics of our early-clobber flag.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e31a27e9428e8..b7ae1d670513c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16867,7 +16867,8 @@ AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
   return ScratchRegs;
 }
 
-ArrayRef<MCPhysReg> AArch64TargetLowering::getRoundingControlRegisters() const {
+ArrayRef<MCPhysReg>
+AArch64TargetLowering::getRoundingControlRegisters(const char *AsmStr) const {
   static const MCPhysReg RCRegs[] = {AArch64::FPCR};
   return RCRegs;
 }
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a44a3d35d2f9c..56c8cc2556dc9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -726,7 +726,8 @@ class AArch64TargetLowering : public TargetLowering {
                                      CodeGenOptLevel OptLevel) const override;
 
   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
-  ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
+  ArrayRef<MCPhysReg>
+  getRoundingControlRegisters(const char *AsmStr) const override;
 
   /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
   bool isDesirableToCommuteWithShift(const SDNode *N,
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index ade54f73bff09..8a5a99f93ffd6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1740,7 +1740,8 @@ namespace llvm {
                         LLVMContext &Context) const override;
 
     const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
-    ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
+    ArrayRef<MCPhysReg>
+    getRoundingControlRegisters(const char *AsmStr) const override;
 
     TargetLoweringBase::AtomicExpansionKind
     shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index b107d56f8cf9b..d6600e0cadd59 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -669,8 +669,14 @@ const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
   return ScratchRegs;
 }
 
-ArrayRef<MCPhysReg> X86TargetLowering::getRoundingControlRegisters() const {
+ArrayRef<MCPhysReg>
+X86TargetLowering::getRoundingControlRegisters(const char *AsmStr) const {
   static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};
+  if (AsmStr) {
+    StringRef S(AsmStr);
+    if (!S.contains("ldmxcsr") && !S.contains("fldcw"))
+      return ArrayRef<MCPhysReg>();
+  }
   return RCRegs;
 }
 
diff --git a/llvm/test/CodeGen/X86/pr59305.ll b/llvm/test/CodeGen/X86/pr59305.ll
index 4d59192fdc4d3..51fae4aad0286 100644
--- a/llvm/test/CodeGen/X86/pr59305.ll
+++ b/llvm/test/CodeGen/X86/pr59305.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i686-pc-linux < %s | FileCheck %s --check-prefix=X86
+; RUN: sed -e "s/SETROUND/ldmxcsr/g" %s | llc -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=X64
+; RUN: sed -e "s/SETROUND/fldcw/g" %s | llc -mtriple=i686-pc-linux < %s | FileCheck %s --check-prefix=X86
 
 define double @foo(double %0) #0 {
 ; X64-LABEL: foo:
@@ -74,6 +74,71 @@ define double @foo(double %0) #0 {
     ret double %8
 }
 
+define double @bar(double %0) #0 {
+; X64-LABEL: bar:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
+; X64-NEXT:    movapd %xmm2, %xmm3
+; X64-NEXT:    divsd %xmm0, %xmm3
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    movapd %xmm2, %xmm1
+; X64-NEXT:    divsd %xmm0, %xmm1
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    divsd %xmm0, %xmm2
+; X64-NEXT:    movapd %xmm3, %xmm0
+; X64-NEXT:    callq fma at PLT
+; X64-NEXT:    popq %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: bar:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $28, %esp
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fld1
+; X86-NEXT:    fld %st(0)
+; X86-NEXT:    fdiv %st(2), %st
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fld %st(1)
+; X86-NEXT:    fdiv %st(3), %st
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fxch %st(2)
+; X86-NEXT:    fdivp %st, %st(3)
+; X86-NEXT:    fxch %st(2)
+; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl (%esp)
+; X86-NEXT:    wait
+; X86-NEXT:    calll fma
+; X86-NEXT:    addl $28, %esp
+; X86-NEXT:    retl
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %2 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %3 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %4 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    %5 = call double @llvm.experimental.constrained.fma.f64(double %2, double %3, double %4, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    ret double %5
+}
+
 declare i32 @fesetround(i32) #0
 declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) #0
 declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #0



More information about the llvm-commits mailing list