[llvm] r221407 - [X86][SSE] Vector integer to float conversion memory folding

Simon Pilgrim llvm-dev at redking.me.uk
Wed Nov 5 14:28:25 PST 2014


Author: rksimon
Date: Wed Nov  5 16:28:25 2014
New Revision: 221407

URL: http://llvm.org/viewvc/llvm-project?rev=221407&view=rev
Log:
[X86][SSE] Vector integer to float conversion memory folding

Added missing memory folding for the (V)CVTDQ2PS instructions - we can safely fold these (but not the (V)CVTDQ2PD versions which have a register/memory size discrepancy in the source operand). I've added a test case demonstrating that stack folding now works.

Differential Revision: http://reviews.llvm.org/D5981


Modified:
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/test/CodeGen/X86/avx1-stack-reload-folding.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=221407&r1=221406&r2=221407&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Wed Nov  5 16:28:25 2014
@@ -448,6 +448,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget
     { X86::CVTSD2SIrr,      X86::CVTSD2SIrm,          0 },
     { X86::CVTSS2SI64rr,    X86::CVTSS2SI64rm,        0 },
     { X86::CVTSS2SIrr,      X86::CVTSS2SIrm,          0 },
+    { X86::CVTDQ2PSrr,      X86::CVTDQ2PSrm,          TB_ALIGN_16 },
     { X86::CVTTPD2DQrr,     X86::CVTTPD2DQrm,         TB_ALIGN_16 },
     { X86::CVTTPS2DQrr,     X86::CVTTPS2DQrm,         TB_ALIGN_16 },
     { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm,  0 },
@@ -526,6 +527,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget
     { X86::VCVTSD2SIrr,     X86::VCVTSD2SIrm,         0 },
     { X86::VCVTSS2SI64rr,   X86::VCVTSS2SI64rm,       0 },
     { X86::VCVTSS2SIrr,     X86::VCVTSS2SIrm,         0 },
+    { X86::VCVTDQ2PSrr,     X86::VCVTDQ2PSrm,         0 },
     { X86::VMOV64toPQIrr,   X86::VMOVQI2PQIrm,        0 },
     { X86::VMOV64toSDrr,    X86::VMOV64toSDrm,        0 },
     { X86::VMOVAPDrr,       X86::VMOVAPDrm,           TB_ALIGN_16 },
@@ -559,6 +561,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget
     { X86::VBROADCASTSSrr,  X86::VBROADCASTSSrm,      TB_NO_REVERSE },
 
     // AVX 256-bit foldable instructions
+    { X86::VCVTDQ2PSYrr,    X86::VCVTDQ2PSYrm,        0 },
     { X86::VMOVAPDYrr,      X86::VMOVAPDYrm,          TB_ALIGN_32 },
     { X86::VMOVAPSYrr,      X86::VMOVAPSYrm,          TB_ALIGN_32 },
     { X86::VMOVDQAYrr,      X86::VMOVDQAYrm,          TB_ALIGN_32 },

Modified: llvm/trunk/test/CodeGen/X86/avx1-stack-reload-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx1-stack-reload-folding.ll?rev=221407&r1=221406&r2=221407&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx1-stack-reload-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx1-stack-reload-folding.ll Wed Nov  5 16:28:25 2014
@@ -3,14 +3,32 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-unknown"
 
-; Function Attrs: nounwind readonly uwtable
-define <32 x double> @_Z14vstack_foldDv32_dS_(<32 x double> %a, <32 x double> %b) #0 {
-  %1 = fadd <32 x double> %a, %b
-  %2 = fsub <32 x double> %a, %b
-  %3 = fmul <32 x double> %1, %2
-  ret <32 x double> %3
+; Stack reload folding tests - we use the 'big vectors' pattern to guarantee spilling to stack.
+;
+; Many of these tests are primarily to check memory folding with specific instructions. Using a basic
+; load/cvt/store pattern to test for this would mean that it wouldn't be the memory folding code thats
+; being tested - the load-execute version of the instruction from the tables would be matched instead.
 
-  ;CHECK-NOT:  vmovapd {{.*#+}} 32-byte Reload
+define void @stack_fold_vmulpd(<64 x double>* %a, <64 x double>* %b, <64 x double>* %c) {
   ;CHECK:       vmulpd {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
-  ;CHECK-NOT:  vmovapd {{.*#+}} 32-byte Reload
+  %1 = load <64 x double>* %a
+  %2 = load <64 x double>* %b
+  %3 = fadd <64 x double> %1, %2
+  %4 = fsub <64 x double> %1, %2
+  %5 = fmul <64 x double> %3, %4
+  store <64 x double> %5, <64 x double>* %c
+  ret void
+}
+
+define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float>* %c) {
+  ;CHECK:   vcvtdq2ps {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+  %1 = load <128 x i32>* %a
+  %2 = load <128 x i32>* %b
+  %3 = and <128 x i32> %1, %2
+  %4 = xor <128 x i32> %1, %2
+  %5 = sitofp <128 x i32> %3 to <128 x float>
+  %6 = sitofp <128 x i32> %4 to <128 x float>
+  %7 = fadd <128 x float> %5, %6
+  store <128 x float> %7, <128 x float>* %c
+  ret void
 }





More information about the llvm-commits mailing list