[llvm] SelectionDAG: Do not propagate divergence through copy glue (PR #101210)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 30 10:45:28 PDT 2024
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/101210
This fixes DAG divergence mishandling inline asm.
This was considering the glue nodes for divergence, when the
divergence should only come from the individual CopyFromRegs
that are glued. As a result, having any VGPR CopyFromRegs would
taint any uniform SGPR copies as divergent, resulting in SGPR
copies to VGPR virtual registers later.
>From 500b6d68e1dc04f3270cc5dae9a9e0325f333fe2 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 30 Jul 2024 20:08:59 +0400
Subject: [PATCH] SelectionDAG: Do not propagate divergence through copy glue
This fixes DAG divergence mishandling inline asm.
This was considering the glue nodes for divergence, when the
divergence should only come from the individual CopyFromRegs
that are glued. As a result, having any VGPR CopyFromRegs would
taint any uniform SGPR copies as divergent, resulting in SGPR
copies to VGPR virtual registers later.
---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 29 +++++++++++++++--
llvm/test/CodeGen/AMDGPU/dag-divergence.ll | 17 ++++++++++
llvm/test/CodeGen/AMDGPU/inline-asm.ll | 31 +++++++++++++++++++
.../CodeGen/AMDGPU/sdag-print-divergence.ll | 4 +--
.../Inputs/amdgpu_isel.ll.expected | 14 ++++-----
5 files changed, 83 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index daebcdabda984..b3ed7f78eb68e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -11587,6 +11587,19 @@ class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener {
} // end anonymous namespace
+/// Return true if a glue output should propagate divergence information.
+static bool gluePropagatesDivergence(const SDNode *Node) {
+ switch (Node->getOpcode()) {
+ case ISD::CopyFromReg:
+ case ISD::CopyToReg:
+ return false;
+ default:
+ return true;
+ }
+
+ llvm_unreachable("covered opcode switch");
+}
+
bool SelectionDAG::calculateDivergence(SDNode *N) {
if (TLI->isSDNodeAlwaysUniform(N)) {
assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) &&
@@ -11596,7 +11609,11 @@ bool SelectionDAG::calculateDivergence(SDNode *N) {
if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA))
return true;
for (const auto &Op : N->ops()) {
- if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
+ EVT VT = Op.getValueType();
+
+ // Skip Chain. It does not carry divergence.
+ if (VT != MVT::Other && Op.getNode()->isDivergent() &&
+ (VT != MVT::Glue || gluePropagatesDivergence(Op.getNode())))
return true;
}
return false;
@@ -13135,8 +13152,14 @@ void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
for (unsigned I = 0; I != Vals.size(); ++I) {
Ops[I].setUser(Node);
Ops[I].setInitial(Vals[I]);
- if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
- IsDivergent |= Ops[I].getNode()->isDivergent();
+ EVT VT = Ops[I].getValueType();
+
+ // Skip Chain. It does not carry divergence.
+ if (VT != MVT::Other &&
+ (VT != MVT::Glue || gluePropagatesDivergence(Ops[I].getNode())) &&
+ Ops[I].getNode()->isDivergent()) {
+ IsDivergent = true;
+ }
}
Node->NumOperands = Vals.size();
Node->OperandList = Ops;
diff --git a/llvm/test/CodeGen/AMDGPU/dag-divergence.ll b/llvm/test/CodeGen/AMDGPU/dag-divergence.ll
index b0b9bbe4835ed..dfc28539ea814 100644
--- a/llvm/test/CodeGen/AMDGPU/dag-divergence.ll
+++ b/llvm/test/CodeGen/AMDGPU/dag-divergence.ll
@@ -28,3 +28,20 @@ define amdgpu_kernel void @flat_load_maybe_divergent(ptr addrspace(4) %k, ptr %f
store i32 %maybe.not.uniform.load, ptr addrspace(1) undef
ret void
}
+
+; This decomposes into a sequence of divergent sub carries. The first
+; subs in the sequence are divergent from the value inputs, but the
+; last values are divergent due to the carry in glue (such that
+; divergence needs to propagate through glue if there are any non-void
+; outputs)
+; GCN-LABEL: {{^}}wide_carry_divergence_error:
+; GCN: v_sub_u32_e32
+; GCN: v_subb_u32_e32
+; GCN: v_subbrev_u32_e32
+; GCN: v_subbrev_u32_e32
+define <2 x i128> @wide_carry_divergence_error(i128 %arg) {
+ %i = call i128 @llvm.ctlz.i128(i128 %arg, i1 false)
+ %i1 = sub i128 0, %i
+ %i2 = insertelement <2 x i128> zeroinitializer, i128 %i1, i64 0
+ ret <2 x i128> %i2
+}
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.ll
index dfb696fb3e29a..881433f48c76f 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.ll
@@ -332,3 +332,34 @@ define void @scc_as_i1() {
call void asm sideeffect "; use $0 ", "{scc}"(i1 %scc)
ret void
}
+
+; Make sure the SGPR def is treated as a uniform value when the inline
+; assembly also defines a divergent value. The add should be scalar
+; and not introduce illegal vgpr to sgpr copies.
+; CHECK-LABEL: {{^}}mixed_def_vgpr_sgpr_def_asm:
+; CHECK: ; def v0 s[4:5]
+; CHECK: s_add_u32
+; CHECK-NEXT: s_addc_u32
+; CHECK: ; use s[4:5]
+define void @mixed_def_vgpr_sgpr_def_asm() {
+ %vgpr_sgpr = call { i32, i64 } asm sideeffect "; def $0 $1 ", "=v,={s[4:5]}"()
+ %vgpr = extractvalue { i32, i64 } %vgpr_sgpr, 0
+ %sgpr = extractvalue { i32, i64 } %vgpr_sgpr, 1
+ %sgpr.add = add i64 %sgpr, 2
+ call void asm sideeffect "; use $0 ", "{s[4:5]}"(i64 %sgpr.add)
+ ret void
+}
+
+; CHECK-LABEL: {{^}}mixed_def_sgpr_vgpr_def_asm:
+; CHECK: ; def s[4:5] v0
+; CHECK: s_add_u32
+; CHECK-NEXT: s_addc_u32
+; CHECK: ; use s[4:5]
+define void @mixed_def_sgpr_vgpr_def_asm() {
+ %sgpr_vgpr = call { i64, i32 } asm sideeffect "; def $0 $1 ", "={s[4:5]},=v"()
+ %sgpr = extractvalue { i64, i32 } %sgpr_vgpr, 0
+ %vgpr = extractvalue { i64, i32 } %sgpr_vgpr, 1
+ %sgpr.add = add i64 %sgpr, 2
+ call void asm sideeffect "; use $0 ", "{s[4:5]}"(i64 %sgpr.add)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/sdag-print-divergence.ll b/llvm/test/CodeGen/AMDGPU/sdag-print-divergence.ll
index a3489a8bb1403..695d5225421de 100644
--- a/llvm/test/CodeGen/AMDGPU/sdag-print-divergence.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdag-print-divergence.ll
@@ -13,7 +13,7 @@
; GCN-DEFAULT: t4: f32,ch = CopyFromReg # D:1 t0, Register:f32 %1
; GCN-DEFAULT: t6: f32 = fadd # D:1 t5, t4
; GCN-DEFAULT: t8: ch,glue = CopyToReg # D:1 t0, Register:f32 $vgpr0, t6
-; GCN-DEFAULT: t9: ch = RETURN_TO_EPILOG # D:1 t8, Register:f32 $vgpr0, t8:1
+; GCN-DEFAULT: t9: ch = RETURN_TO_EPILOG t8, Register:f32 $vgpr0, t8:1
; GCN-VERBOSE: t0: ch,glue = EntryToken # D:0
; GCN-VERBOSE: t2: f32,ch = CopyFromReg [ORD=1] # D:0 t0, Register:f32 %0 # D:0
@@ -21,7 +21,7 @@
; GCN-VERBOSE: t4: f32,ch = CopyFromReg [ORD=1] # D:1 t0, Register:f32 %1 # D:0
; GCN-VERBOSE: t6: f32 = fadd [ORD=3] # D:1 t5, t4
; GCN-VERBOSE: t8: ch,glue = CopyToReg [ORD=4] # D:1 t0, Register:f32 $vgpr0 # D:0, t6
-; GCN-VERBOSE: t9: ch = RETURN_TO_EPILOG [ORD=4] # D:1 t8, Register:f32 $vgpr0 # D:0, t8:1
+; GCN-VERBOSE: t9: ch = RETURN_TO_EPILOG [ORD=4] # D:0 t8, Register:f32 $vgpr0 # D:0, t8:1
define amdgpu_ps float @test_sdag_dump(float inreg %scalar, float %vector) {
entry:
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
index 38b8ba12f0662..bdba243634689 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
@@ -16,7 +16,7 @@ define i64 @i64_test(i64 %i) nounwind readnone {
; CHECK-NEXT: t16: ch,glue = CopyToReg # D:1 t0, Register:i32 $vgpr0, t23
; CHECK-NEXT: t38: i32 = EXTRACT_SUBREG # D:1 t10, TargetConstant:i32<11>
; CHECK-NEXT: t18: ch,glue = CopyToReg # D:1 t16, Register:i32 $vgpr1, t38, t16:1
-; CHECK-NEXT: t19: ch = SI_RETURN # D:1 Register:i32 $vgpr0, Register:i32 $vgpr1, t18, t18:1
+; CHECK-NEXT: t19: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t18, t18:1
; CHECK-EMPTY:
%loc = alloca i64, addrspace(5)
%j = load i64, ptr addrspace(5) %loc
@@ -33,8 +33,8 @@ define i64 @i32_test(i32 %i) nounwind readnone {
; CHECK-NEXT: t7: i32,i1 = V_ADD_CO_U32_e64 # D:1 t2, t6, TargetConstant:i1<0>
; CHECK-NEXT: t14: ch,glue = CopyToReg # D:1 t0, Register:i32 $vgpr0, t7
; CHECK-NEXT: t22: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT: t16: ch,glue = CopyToReg # D:1 t14, Register:i32 $vgpr1, t22, t14:1
-; CHECK-NEXT: t17: ch = SI_RETURN # D:1 Register:i32 $vgpr0, Register:i32 $vgpr1, t16, t16:1
+; CHECK-NEXT: t16: ch,glue = CopyToReg t14, Register:i32 $vgpr1, t22, t14:1
+; CHECK-NEXT: t17: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t16, t16:1
; CHECK-EMPTY:
%loc = alloca i32, addrspace(5)
%j = load i32, ptr addrspace(5) %loc
@@ -54,8 +54,8 @@ define i64 @i16_test(i16 %i) nounwind readnone {
; CHECK-NEXT: t25: i32 = V_AND_B32_e64 # D:1 t20, t24
; CHECK-NEXT: t15: ch,glue = CopyToReg # D:1 t0, Register:i32 $vgpr0, t25
; CHECK-NEXT: t31: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT: t17: ch,glue = CopyToReg # D:1 t15, Register:i32 $vgpr1, t31, t15:1
-; CHECK-NEXT: t18: ch = SI_RETURN # D:1 Register:i32 $vgpr0, Register:i32 $vgpr1, t17, t17:1
+; CHECK-NEXT: t17: ch,glue = CopyToReg t15, Register:i32 $vgpr1, t31, t15:1
+; CHECK-NEXT: t18: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t17, t17:1
; CHECK-EMPTY:
%loc = alloca i16, addrspace(5)
%j = load i16, ptr addrspace(5) %loc
@@ -75,8 +75,8 @@ define i64 @i8_test(i8 %i) nounwind readnone {
; CHECK-NEXT: t25: i32 = V_AND_B32_e64 # D:1 t20, t24
; CHECK-NEXT: t15: ch,glue = CopyToReg # D:1 t0, Register:i32 $vgpr0, t25
; CHECK-NEXT: t31: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT: t17: ch,glue = CopyToReg # D:1 t15, Register:i32 $vgpr1, t31, t15:1
-; CHECK-NEXT: t18: ch = SI_RETURN # D:1 Register:i32 $vgpr0, Register:i32 $vgpr1, t17, t17:1
+; CHECK-NEXT: t17: ch,glue = CopyToReg t15, Register:i32 $vgpr1, t31, t15:1
+; CHECK-NEXT: t18: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t17, t17:1
; CHECK-EMPTY:
%loc = alloca i8, addrspace(5)
%j = load i8, ptr addrspace(5) %loc
More information about the llvm-commits
mailing list