[PATCH] D108757: [X86][Codegen] PR51615: don't replace wide volatile load with narrow broadcast-from-memory

Roman Lebedev via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 26 06:55:23 PDT 2021


lebedev.ri updated this revision to Diff 368867.
lebedev.ri marked an inline comment as done.
lebedev.ri added a comment.

Deduplicate comments.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D108757/new/

https://reviews.llvm.org/D108757

Files:
  llvm/lib/Target/X86/X86ISelLowering.cpp
  llvm/test/CodeGen/X86/pr51615.ll


Index: llvm/test/CodeGen/X86/pr51615.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/pr51615.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=ALL,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX2
+
+; https://bugs.llvm.org/show_bug.cgi?id=51615
+; We can not replace a wide volatile load with a broadcast-from-memory,
+; because that would narrow the load, which isn't legal for volatiles.
+
+ at test5_id1234 = external dso_local global <2 x double>, align 16
+define void @volatile_load_2_elts() {
+; AVX-LABEL: volatile_load_2_elts:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps test5_id1234(%rip), %xmm0
+; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
+; AVX-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
+; AVX-NEXT:    vmovapd %ymm0, (%rax)
+; AVX-NEXT:    vmovapd %ymm1, (%rax)
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+;
+; AVX2-LABEL: volatile_load_2_elts:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps test5_id1234(%rip), %xmm0
+; AVX2-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT:    vmovaps %ymm0, (%rax)
+; AVX2-NEXT:    vmovaps %ymm2, (%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+  %i = load volatile <2 x double>, <2 x double>* @test5_id1234, align 16
+  %i1 = shufflevector <2 x double> %i, <2 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
+  %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
+  store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
+  ret void
+}
+
+ at test5_id12345 = external dso_local global <1 x double>, align 16
+define void @volatile_load_1_elt() {
+; ALL-LABEL: volatile_load_1_elt:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vbroadcastsd test5_id12345(%rip), %ymm0
+; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; ALL-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; ALL-NEXT:    vmovaps %ymm0, (%rax)
+; ALL-NEXT:    vmovaps %ymm2, (%rax)
+; ALL-NEXT:    vzeroupper
+; ALL-NEXT:    retq
+  %i = load volatile <1 x double>, <1 x double>* @test5_id12345, align 16
+  %i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
+  %shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
+  store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
+  ret void
+}
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- llvm/lib/Target/X86/X86ISelLowering.cpp
+++ llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5036,6 +5036,17 @@
   return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
 }
 
+static bool MayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT) {
+  if (!MayFoldLoad(Op))
+    return false;
+
+  // We can not replace a wide volatile load with a broadcast-from-memory,
+  // because that would narrow the load, which isn't legal for volatiles.
+  const LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op.getNode());
+  return !Ld->isVolatile() ||
+         Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
+}
+
 static bool MayFoldIntoStore(SDValue Op) {
   return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
 }
@@ -50876,7 +50887,8 @@
 
     // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
     if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
-        (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
+        (Subtarget.hasAVX2() || MayFoldLoadIntoBroadcastFromMem(
+                                    Op0.getOperand(0), VT.getScalarType())))
       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
                                      Op0.getOperand(0),


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D108757.368867.patch
Type: text/x-patch
Size: 4648 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20210826/56553e08/attachment.bin>


More information about the llvm-commits mailing list