[llvm] 6734018 - [Codegen][X86] EltsFromConsecutiveLoads(): if only have AVX1, ensure that the "load" is actually foldable (PR51615)
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 27 10:27:06 PDT 2021
Author: Roman Lebedev
Date: 2021-08-27T20:26:53+03:00
New Revision: 6734018041e408fe3cad13f2782d8048437de405
URL: https://github.com/llvm/llvm-project/commit/6734018041e408fe3cad13f2782d8048437de405
DIFF: https://github.com/llvm/llvm-project/commit/6734018041e408fe3cad13f2782d8048437de405.diff
LOG: [Codegen][X86] EltsFromConsecutiveLoads(): if only have AVX1, ensure that the "load" is actually foldable (PR51615)
This fixes another reproducer from https://bugs.llvm.org/show_bug.cgi?id=51615
And again, the fix lies not in the code added in D105390
In this case, we completely don't check that the "broadcast-from-mem" we create
can actually fold the load. In this case, it's operand was not a load at all:
```
Combining: t16: v8i32 = vector_shuffle<0,u,u,u,0,u,u,u> t14, undef:v8i32
Creating new node: t29: i32 = undef
RepeatLoad:
t8: i32 = truncate t7
t7: i64 = extract_vector_elt t5, Constant:i64<0>
t5: v2i64,ch = load<(load (s128) from %ir.arg)> t0, t2, undef:i64
t2: i64,ch = CopyFromReg t0, Register:i64 %0
t1: i64 = Register %0
t4: i64 = undef
t3: i64 = Constant<0>
Combining: t15: v8i32 = undef
```
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D108821
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/pr51615.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bda849db9f597..64436da7c0a07 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5032,12 +5032,13 @@ X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
// Other Lowering Hooks
//===----------------------------------------------------------------------===//
-static bool MayFoldLoad(SDValue Op) {
- return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
+static bool MayFoldLoad(SDValue Op, bool AssumeSingleUse = false) {
+ return (AssumeSingleUse || Op.hasOneUse()) && ISD::isNormalLoad(Op.getNode());
}
-static bool MayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT) {
- if (!MayFoldLoad(Op))
+static bool MayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
+ bool AssumeSingleUse = false) {
+ if (!MayFoldLoad(Op, AssumeSingleUse))
return false;
// We can not replace a wide volatile load with a broadcast-from-memory,
@@ -8981,6 +8982,11 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
} else {
+ if (!Subtarget.hasAVX2() &&
+ !MayFoldLoadIntoBroadcastFromMem(
+ RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
+ /*AssumeSingleUse=*/true))
+ return SDValue();
Broadcast =
DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
}
diff --git a/llvm/test/CodeGen/X86/pr51615.ll b/llvm/test/CodeGen/X86/pr51615.ll
index f7269310485f5..feccec0a249ee 100644
--- a/llvm/test/CodeGen/X86/pr51615.ll
+++ b/llvm/test/CodeGen/X86/pr51615.ll
@@ -79,3 +79,65 @@ define void @volatile_load_2_elts_bitcast() {
store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
ret void
}
+
+define void @elts_from_consecutive_loads(<2 x i64>* %arg, i32* %arg12, <8 x i32>* %arg13, float %arg14, i1 %arg15) {
+; AVX-LABEL: elts_from_consecutive_loads:
+; AVX: # %bb.0: # %bb
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB3_1: # %bb16
+; AVX-NEXT: # =>This Loop Header: Depth=1
+; AVX-NEXT: # Child Loop BB3_2 Depth 2
+; AVX-NEXT: testb $1, %cl
+; AVX-NEXT: je .LBB3_1
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB3_2: # %bb17
+; AVX-NEXT: # Parent Loop BB3_1 Depth=1
+; AVX-NEXT: # => This Inner Loop Header: Depth=2
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: vbroadcastss (%rdi), %ymm2
+; AVX-NEXT: movl %eax, (%rsi)
+; AVX-NEXT: vmovaps %ymm2, (%rdx)
+; AVX-NEXT: vucomiss %xmm1, %xmm0
+; AVX-NEXT: jne .LBB3_2
+; AVX-NEXT: jp .LBB3_2
+; AVX-NEXT: jmp .LBB3_1
+;
+; AVX2-LABEL: elts_from_consecutive_loads:
+; AVX2: # %bb.0: # %bb
+; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB3_1: # %bb16
+; AVX2-NEXT: # =>This Loop Header: Depth=1
+; AVX2-NEXT: # Child Loop BB3_2 Depth 2
+; AVX2-NEXT: testb $1, %cl
+; AVX2-NEXT: je .LBB3_1
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB3_2: # %bb17
+; AVX2-NEXT: # Parent Loop BB3_1 Depth=1
+; AVX2-NEXT: # => This Inner Loop Header: Depth=2
+; AVX2-NEXT: vmovaps (%rdi), %xmm2
+; AVX2-NEXT: vmovss %xmm2, (%rsi)
+; AVX2-NEXT: vbroadcastss %xmm2, %ymm2
+; AVX2-NEXT: vmovaps %ymm2, (%rdx)
+; AVX2-NEXT: vucomiss %xmm1, %xmm0
+; AVX2-NEXT: jne .LBB3_2
+; AVX2-NEXT: jp .LBB3_2
+; AVX2-NEXT: jmp .LBB3_1
+bb:
+ br label %bb16
+
+bb16: ; preds = %bb17, %bb16, %bb
+ br i1 %arg15, label %bb17, label %bb16
+
+bb17: ; preds = %bb17, %bb16
+ %tmp = load <2 x i64>, <2 x i64>* %arg, align 16
+ %tmp18 = extractelement <2 x i64> %tmp, i32 0
+ %tmp19 = trunc i64 %tmp18 to i32
+ store i32 %tmp19, i32* %arg12, align 4
+ %tmp20 = insertelement <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, i32 %tmp19, i32 0
+ %tmp21 = shufflevector <8 x i32> %tmp20, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef>
+ store <8 x i32> %tmp21, <8 x i32>* %arg13, align 32
+ %tmp22 = fcmp une float %arg14, 0.000000e+00
+ br i1 %tmp22, label %bb17, label %bb16
+}
More information about the llvm-commits
mailing list