[llvm] 6dd8834 - X86/GlobalISel: Rely on default assignValueToReg

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue May 4 13:36:42 PDT 2021


Author: Matt Arsenault
Date: 2021-05-04T16:36:37-04:00
New Revision: 6dd883477249dae1c57e07f6c4375eb90c2df297

URL: https://github.com/llvm/llvm-project/commit/6dd883477249dae1c57e07f6c4375eb90c2df297
DIFF: https://github.com/llvm/llvm-project/commit/6dd883477249dae1c57e07f6c4375eb90c2df297.diff

LOG: X86/GlobalISel: Rely on default assignValueToReg

The resulting output is semantically closer to what the DAG emits and
is more compatible with the existing CCAssignFns.

The returns of f32 in f80 are clearly broken, but they were broken
before when using G_ANYEXT to go from f32 to f80.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86CallLowering.cpp
    llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
    llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
    llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index bebc654ae741..d2fd1d3ef6ce 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -123,26 +123,7 @@ struct X86OutgoingValueHandler : public CallLowering::OutgoingValueHandler {
   void assignValueToReg(Register ValVReg, Register PhysReg,
                         CCValAssign &VA) override {
     MIB.addUse(PhysReg, RegState::Implicit);
-
-    Register ExtReg;
-    // If we are copying the value to a physical register with the
-    // size larger than the size of the value itself - build AnyExt
-    // to the size of the register first and only then do the copy.
-    // The example of that would be copying from s32 to xmm0, for which
-    // case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
-    // we expect normal extendRegister mechanism to work.
-    unsigned PhysRegSize =
-        MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
-    unsigned ValSize = VA.getValVT().getSizeInBits();
-    unsigned LocSize = VA.getLocVT().getSizeInBits();
-    if (PhysRegSize > ValSize && LocSize == ValSize) {
-      assert((PhysRegSize == 128 || PhysRegSize == 80) &&
-             "We expect that to be 128 bit");
-      ExtReg =
-          MIRBuilder.buildAnyExt(LLT::scalar(PhysRegSize), ValVReg).getReg(0);
-    } else
-      ExtReg = extendRegister(ValVReg, VA);
-
+    Register ExtReg = extendRegister(ValVReg, VA);
     MIRBuilder.buildCopy(PhysReg, ExtReg);
   }
 
@@ -264,36 +245,7 @@ struct X86IncomingValueHandler : public CallLowering::IncomingValueHandler {
   void assignValueToReg(Register ValVReg, Register PhysReg,
                         CCValAssign &VA) override {
     markPhysRegUsed(PhysReg);
-
-    switch (VA.getLocInfo()) {
-    default: {
-      // If we are copying the value from a physical register with the
-      // size larger than the size of the value itself - build the copy
-      // of the phys reg first and then build the truncation of that copy.
-      // The example of that would be copying from xmm0 to s32, for which
-      // case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
-      // we expect this to be handled in SExt/ZExt/AExt case.
-      unsigned PhysRegSize =
-          MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
-      unsigned ValSize = VA.getValVT().getSizeInBits();
-      unsigned LocSize = VA.getLocVT().getSizeInBits();
-      if (PhysRegSize > ValSize && LocSize == ValSize) {
-        auto Copy = MIRBuilder.buildCopy(LLT::scalar(PhysRegSize), PhysReg);
-        MIRBuilder.buildTrunc(ValVReg, Copy);
-        return;
-      }
-
-      MIRBuilder.buildCopy(ValVReg, PhysReg);
-      break;
-    }
-    case CCValAssign::LocInfo::SExt:
-    case CCValAssign::LocInfo::ZExt:
-    case CCValAssign::LocInfo::AExt: {
-      auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
-      MIRBuilder.buildTrunc(ValVReg, Copy);
-      break;
-    }
-    }
+    IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
   }
 
   /// How the physical register gets marked varies between formal

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index a4829917c324..0ee0bd0237cc 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -219,18 +219,14 @@ define float @test_float_args(float %arg1, float %arg2) {
   ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
   ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
   ; X86:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X86:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s32)
-  ; X86:   $fp0 = COPY [[ANYEXT]](s80)
+  ; X86:   $fp0 = COPY [[LOAD1]](s32)
   ; X86:   RET 0, implicit $fp0
   ; X64-LABEL: name: test_float_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
-  ; X64:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; X64:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
-  ; X64:   [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
-  ; X64:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
-  ; X64:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s32)
-  ; X64:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; X64:   [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+  ; X64:   [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
+  ; X64:   $xmm0 = COPY [[COPY1]](s32)
   ; X64:   RET 0, implicit $xmm0
   ret float %arg2
 }
@@ -242,18 +238,14 @@ define double @test_double_args(double %arg1, double %arg2) {
   ; X86:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
   ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
   ; X86:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0)
-  ; X86:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s64)
-  ; X86:   $fp0 = COPY [[ANYEXT]](s80)
+  ; X86:   $fp0 = COPY [[LOAD1]](s64)
   ; X86:   RET 0, implicit $fp0
   ; X64-LABEL: name: test_double_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
-  ; X64:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; X64:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
-  ; X64:   [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
-  ; X64:   [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
-  ; X64:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s64)
-  ; X64:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; X64:   [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+  ; X64:   [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
+  ; X64:   $xmm0 = COPY [[COPY1]](s64)
   ; X64:   RET 0, implicit $xmm0
   ret double %arg2
 }
@@ -686,8 +678,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
   ; X64:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr)
   ; X64:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
   ; X64:   $rdi = COPY [[LOAD]](p0)
-  ; X64:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64)
-  ; X64:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; X64:   $xmm0 = COPY [[LOAD1]](s64)
   ; X64:   $al = MOV8ri 1
   ; X64:   CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al
   ; X64:   ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
index b33daf570d45..b6758c8cca0e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
@@ -13,16 +13,14 @@ define float @test_return_f1(float %f.coerce) {
   ; ALL-LABEL: name: test_return_f1
   ; ALL: bb.1.entry:
   ; ALL:   liveins: $xmm0
-  ; ALL:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; ALL:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+  ; ALL:   [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
   ; ALL:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; ALL:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
   ; ALL:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f
-  ; ALL:   G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
+  ; ALL:   G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
   ; ALL:   G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
   ; ALL:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 4 from %ir.coerce.dive13)
-  ; ALL:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32)
-  ; ALL:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; ALL:   $xmm0 = COPY [[LOAD]](s32)
   ; ALL:   RET 0, implicit $xmm0
 entry:
   %retval = alloca %struct.f1, align 4
@@ -43,16 +41,14 @@ define double @test_return_d1(double %d.coerce) {
   ; ALL-LABEL: name: test_return_d1
   ; ALL: bb.1.entry:
   ; ALL:   liveins: $xmm0
-  ; ALL:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; ALL:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+  ; ALL:   [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
   ; ALL:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; ALL:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
   ; ALL:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
-  ; ALL:   G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2)
+  ; ALL:   G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2)
   ; ALL:   G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8)
   ; ALL:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.coerce.dive13)
-  ; ALL:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
-  ; ALL:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; ALL:   $xmm0 = COPY [[LOAD]](s64)
   ; ALL:   RET 0, implicit $xmm0
 entry:
   %retval = alloca %struct.d1, align 8
@@ -71,25 +67,21 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
   ; ALL-LABEL: name: test_return_d2
   ; ALL: bb.1.entry:
   ; ALL:   liveins: $xmm0, $xmm1
-  ; ALL:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; ALL:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
-  ; ALL:   [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
-  ; ALL:   [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+  ; ALL:   [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+  ; ALL:   [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
   ; ALL:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
   ; ALL:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
   ; ALL:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
-  ; ALL:   G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1)
+  ; ALL:   G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1)
   ; ALL:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; ALL:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
-  ; ALL:   G_STORE [[TRUNC1]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.2)
+  ; ALL:   G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.2)
   ; ALL:   G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8)
   ; ALL:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.5)
   ; ALL:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
   ; ALL:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load 8 from %ir.5 + 8)
-  ; ALL:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
-  ; ALL:   $xmm0 = COPY [[ANYEXT]](s128)
-  ; ALL:   [[ANYEXT1:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64)
-  ; ALL:   $xmm1 = COPY [[ANYEXT1]](s128)
+  ; ALL:   $xmm0 = COPY [[LOAD]](s64)
+  ; ALL:   $xmm1 = COPY [[LOAD1]](s64)
   ; ALL:   RET 0, implicit $xmm0, implicit $xmm1
 entry:
   %retval = alloca %struct.d2, align 8

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator.ll
index 1d6c8c210952..b158610027c1 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator.ll
@@ -189,11 +189,9 @@ define float @test_fptrunc(double %in) {
   ; CHECK-LABEL: name: test_fptrunc
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $xmm0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
-  ; CHECK:   [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[TRUNC]](s64)
-  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FPTRUNC]](s32)
-  ; CHECK:   $xmm0 = COPY [[ANYEXT]](s128)
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+  ; CHECK:   [[FPTRUNC:%[0-9]+]]:_(s32) = G_FPTRUNC [[COPY]](s64)
+  ; CHECK:   $xmm0 = COPY [[FPTRUNC]](s32)
   ; CHECK:   RET 0, implicit $xmm0
   %res = fptrunc double %in to float
   ret float %res


        


More information about the llvm-commits mailing list