[llvm] r364498 - [X86] Teach selectScalarSSELoad to not narrow volatile loads.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 26 22:51:56 PDT 2019
Author: ctopper
Date: Wed Jun 26 22:51:56 2019
New Revision: 364498
URL: http://llvm.org/viewvc/llvm-project?rev=364498&view=rev
Log:
[X86] Teach selectScalarSSELoad to not narrow volatile loads.
Modified:
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=364498&r1=364497&r2=364498&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Jun 26 22:51:56 2019
@@ -2283,12 +2283,14 @@ bool X86DAGToDAGISel::selectScalarSSELoa
if (!hasSingleUsesFromRoot(Root, Parent))
return false;
- // We can allow a full vector load here since narrowing a load is ok.
+ // We can allow a full vector load here since narrowing a load is ok unless
+ // it's volatile.
if (ISD::isNON_EXTLoad(N.getNode())) {
- PatternNodeWithChain = N;
- if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
- IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
- LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (!LD->isVolatile() &&
+ IsProfitableToFold(N, LD, Root) &&
+ IsLegalToFold(N, Parent, Root, OptLevel)) {
+ PatternNodeWithChain = N;
return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
Segment);
}
Modified: llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-unops.ll?rev=364498&r1=364497&r2=364498&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-unops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-unops.ll Wed Jun 26 22:51:56 2019
@@ -179,6 +179,23 @@ define <4 x float> @sqrtss_full_size(<4
ret <4 x float> %res
}
+define <4 x float> @sqrtss_full_size_volatile(<4 x float>* %a) optsize{
+; SSE-LABEL: sqrtss_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps (%rdi), %xmm0
+; SSE-NEXT: sqrtss %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtss_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps (%rdi), %xmm0
+; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <4 x float>, <4 x float>* %a
+ %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ld)
+ ret <4 x float> %res
+}
+
define double @sqrtsd_size(double* %a) optsize {
; SSE-LABEL: sqrtsd_size:
; SSE: # %bb.0:
@@ -212,6 +229,23 @@ define <2 x double> @sqrtsd_full_size(<2
%res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ld)
ret <2 x double> %res
}
+
+define <2 x double> @sqrtsd_full_size_volatile(<2 x double>* %a) optsize {
+; SSE-LABEL: sqrtsd_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd (%rdi), %xmm0
+; SSE-NEXT: sqrtsd %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtsd_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovapd (%rdi), %xmm0
+; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <2 x double>, <2 x double>* %a
+ %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ld)
+ ret <2 x double> %res
+}
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
More information about the llvm-commits
mailing list