[llvm] a0e6f83 - [ConstraintFP] Model rounding control registers for inline asm (#92846)

via llvm-commits llvm-commits at lists.llvm.org
Thu May 23 07:50:43 PDT 2024


Author: Phoebe Wang
Date: 2024-05-23T22:50:39+08:00
New Revision: a0e6f83781bde8474fcf772595fc3da79b3fabc4

URL: https://github.com/llvm/llvm-project/commit/a0e6f83781bde8474fcf772595fc3da79b3fabc4
DIFF: https://github.com/llvm/llvm-project/commit/a0e6f83781bde8474fcf772595fc3da79b3fabc4.diff

LOG: [ConstraintFP] Model rounding control registers for inline asm (#92846)

We have an internal test affected by
https://github.com/llvm/llvm-project/commit/a69673615bb9f14794056470a32f70f60a52213d.
The reason is we haven't modeled rounding control registers for inline
asm under constraint FP.
Here is a reduced case:  https://godbolt.org/z/s4EsKP94e

Added: 
    

Modified: 
    llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
    llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
    llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
    llvm/test/CodeGen/X86/pr59305.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 14e1e1fdf01de..5acf35b37882e 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -538,6 +538,13 @@ bool InlineAsmLowering::lowerInlineAsm(
     }
   }
 
+  // Add rounding control registers as implicit def for inline asm.
+  if (MF.getFunction().hasFnAttribute(Attribute::StrictFP)) {
+    ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
+    for (MCPhysReg Reg : RCRegs)
+      Inst.addReg(Reg, RegState::ImplicitDefine);
+  }
+
   if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
     auto *Token = Bundle->Inputs[0].get();
     ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 759368a67a16c..36738961382ef 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1412,6 +1412,13 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
       }
     }
 
+    // Add rounding control registers as implicit def for inline asm.
+    if (MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
+      ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
+      for (MCPhysReg Reg : RCRegs)
+        MIB.addReg(Reg, RegState::ImplicitDefine);
+    }
+
     // GCC inline assembly allows input operands to also be early-clobber
     // output operands (so long as the operand is written only after it's
     // used), but this does not match the semantics of our early-clobber flag.

diff  --git a/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
index 7ad24302d7834..0c4974f347a8f 100644
--- a/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
@@ -43,7 +43,7 @@ define float @asm_changes_mode(float %x, float %y) #0 {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; CHECK-NEXT:   INLINEASM &"; maybe defs mode", 1 /* sideeffect attdialect */
+  ; CHECK-NEXT:   INLINEASM &"; maybe defs mode", 1 /* sideeffect attdialect */, implicit-def $mode
   ; CHECK-NEXT:   [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
   ; CHECK-NEXT:   $vgpr0 = COPY [[V_ADD_F32_e64_]]
   ; CHECK-NEXT:   SI_RETURN implicit $vgpr0

diff  --git a/llvm/test/CodeGen/X86/pr59305.ll b/llvm/test/CodeGen/X86/pr59305.ll
index 4d59192fdc4d3..46c9da5a51939 100644
--- a/llvm/test/CodeGen/X86/pr59305.ll
+++ b/llvm/test/CodeGen/X86/pr59305.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i686-pc-linux < %s | FileCheck %s --check-prefix=X86
+; RUN: sed -e "s/SETROUND/ldmxcsr/g" %s | llc -mtriple=x86_64-pc-linux - | FileCheck %s --check-prefix=X64
+; RUN: sed -e "s/SETROUND/fldcw/g" %s | llc -mtriple=i686-pc-linux - | FileCheck %s --check-prefix=X86
 
 define double @foo(double %0) #0 {
 ; X64-LABEL: foo:
@@ -74,6 +74,71 @@ define double @foo(double %0) #0 {
     ret double %8
 }
 
+define double @bar(double %0) #0 {
+; X64-LABEL: bar:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
+; X64-NEXT:    movapd %xmm2, %xmm3
+; X64-NEXT:    divsd %xmm0, %xmm3
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    movapd %xmm2, %xmm1
+; X64-NEXT:    divsd %xmm0, %xmm1
+; X64-NEXT:    #APP
+; X64-NEXT:    ldmxcsr 0
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    wait
+; X64-NEXT:    divsd %xmm0, %xmm2
+; X64-NEXT:    movapd %xmm3, %xmm0
+; X64-NEXT:    callq fma at PLT
+; X64-NEXT:    popq %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: bar:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $28, %esp
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fld1
+; X86-NEXT:    fld %st(0)
+; X86-NEXT:    fdiv %st(2), %st
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fld %st(1)
+; X86-NEXT:    fdiv %st(3), %st
+; X86-NEXT:    #APP
+; X86-NEXT:    fldcw 0
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    fxch %st(2)
+; X86-NEXT:    fdivp %st, %st(3)
+; X86-NEXT:    fxch %st(2)
+; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl (%esp)
+; X86-NEXT:    wait
+; X86-NEXT:    calll fma
+; X86-NEXT:    addl $28, %esp
+; X86-NEXT:    retl
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %2 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %3 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+    %4 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    %5 = call double @llvm.experimental.constrained.fma.f64(double %2, double %3, double %4, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+    ret double %5
+}
+
 declare i32 @fesetround(i32) #0
 declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) #0
 declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #0


        


More information about the llvm-commits mailing list