[llvm] 00b22df - AMDGPU: Fix extra type mangling on llvm.amdgcn.if.break
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 3 07:15:00 PST 2020
Author: Matt Arsenault
Date: 2020-02-03T07:02:05-08:00
New Revision: 00b22df71d87613dd4e54fa0ece8784a43a47d85
URL: https://github.com/llvm/llvm-project/commit/00b22df71d87613dd4e54fa0ece8784a43a47d85
DIFF: https://github.com/llvm/llvm-project/commit/00b22df71d87613dd4e54fa0ece8784a43a47d85.diff
LOG: AMDGPU: Fix extra type mangling on llvm.amdgcn.if.break
These have to be the same mask type.
Added:
Modified:
llvm/include/llvm/IR/IntrinsicsAMDGPU.td
llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
llvm/test/CodeGen/AMDGPU/loop_break.ll
llvm/test/CodeGen/AMDGPU/multilevel-break.ll
llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index bccab0364e71..207b5b55e4bd 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1858,7 +1858,7 @@ def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
>;
def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty],
- [llvm_i1_ty, llvm_anyint_ty], [IntrNoMem, IntrConvergent]
+ [llvm_i1_ty, LLVMMatchType<0>], [IntrNoMem, IntrConvergent]
>;
def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index 27320472cacb..3c41bf1fef5e 100644
--- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -153,7 +153,7 @@ void SIAnnotateControlFlow::initialize(Module &M, const GCNSubtarget &ST) {
Else = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_else,
{ IntMask, IntMask });
IfBreak = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if_break,
- { IntMask, IntMask });
+ { IntMask });
Loop = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_loop, { IntMask });
EndCf = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_end_cf, { IntMask });
}
diff --git a/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll b/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
index c8e9a4691099..c373cc8d9ae2 100644
--- a/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
+++ b/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
@@ -38,7 +38,7 @@ sw.epilog:
; CHECK: load i8
; CHECK-NOT: {{ br }}
; CHECK: [[ICMP:%[a-zA-Z0-9._]+]] = icmp eq
-; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[ICMP]], i64 [[PHI]])
+; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[ICMP]], i64 [[PHI]])
; CHECK: [[LOOP:%[a-zA-Z0-9._]+]] = call i1 @llvm.amdgcn.loop.i64(i64 [[IF]])
; CHECK: br i1 [[LOOP]]
diff --git a/llvm/test/CodeGen/AMDGPU/loop_break.ll b/llvm/test/CodeGen/AMDGPU/loop_break.ll
index ae4578511246..d02d406689a9 100644
--- a/llvm/test/CodeGen/AMDGPU/loop_break.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop_break.ll
@@ -15,7 +15,7 @@
; OPT: br label %Flow
; OPT: Flow:
-; OPT: call i64 @llvm.amdgcn.if.break.i64.i64(
+; OPT: call i64 @llvm.amdgcn.if.break.i64(
; OPT: call i1 @llvm.amdgcn.loop.i64(i64
; OPT: br i1 %{{[0-9]+}}, label %bb9, label %bb1
@@ -84,7 +84,7 @@ bb9:
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
@@ -138,7 +138,7 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
@@ -189,7 +189,7 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
@@ -239,7 +239,7 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp3, i64 %phi.broken)
; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
@@ -294,7 +294,7 @@ bb9: ; preds = %Flow
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
; OPT-NEXT: %0 = xor i1 %tmp3, true
-; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %0, i64 %phi.broken)
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break.i64(i1 %0, i64 %phi.broken)
; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop.i64(i64 %1)
; OPT-NEXT: br i1 %2, label %bb9, label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
index aa80dea28e10..932e6ce11045 100644
--- a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
@@ -28,9 +28,9 @@ define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
; OPT-NEXT: [[TMP5:%.*]] = phi i1 [ [[TMP51:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
; OPT-NEXT: [[TMP6:%.*]] = phi i1 [ [[TMP11:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP3]])
-; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
+; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP8:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP7]])
-; OPT-NEXT: [[TMP9]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP5]], i64 [[PHI_BROKEN2]])
+; OPT-NEXT: [[TMP9]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP5]], i64 [[PHI_BROKEN2]])
; OPT-NEXT: br i1 [[TMP8]], label [[FLOW1]], label [[LOOP]]
; OPT: Flow1:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]])
@@ -146,7 +146,7 @@ define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
; OPT: Flow4:
; OPT-NEXT: [[TMP3:%.*]] = phi i1 [ [[TMP12:%.*]], [[FLOW5]] ], [ [[TMP8:%.*]], [[FLOW]] ]
; OPT-NEXT: [[TMP4:%.*]] = phi i1 [ [[TMP13:%.*]], [[FLOW5]] ], [ [[TMP9:%.*]], [[FLOW]] ]
-; OPT-NEXT: [[TMP5]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP3]], i64 [[PHI_BROKEN]])
+; OPT-NEXT: [[TMP5]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP3]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP6:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP5]])
; OPT-NEXT: br i1 [[TMP6]], label [[FLOW6:%.*]], label [[BB1]]
; OPT: case0:
diff --git a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
index 48dbb28b4a2b..99bbc608c486 100644
--- a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
+++ b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
@@ -24,7 +24,7 @@
; IR: bb10:
; IR-NEXT: %tmp11 = phi i32 [ %6, %Flow ]
; IR-NEXT: %tmp12 = phi i1 [ %5, %Flow ]
-; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp12, i64 %phi.broken)
+; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break.i64(i1 %tmp12, i64 %phi.broken)
; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop.i64(i64 %3)
; IR-NEXT: br i1 %4, label %bb23, label %bb5
@@ -150,7 +150,7 @@ bb23: ; preds = %bb10
; IR-NEXT: %14 = phi i1 [ %18, %bb21 ], [ false, %bb14 ]
; IR-NEXT: %15 = phi i1 [ false, %bb21 ], [ true, %bb14 ]
; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %10)
-; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %13, i64 %phi.broken)
+; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break.i64(i1 %13, i64 %phi.broken)
; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop.i64(i64 %16)
; IR-NEXT: br i1 %17, label %Flow2, label %bb14
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll b/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
index 1d0cb626f013..0fc1f4ba8133 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
@@ -17,13 +17,13 @@ define amdgpu_kernel void @multiple_backedges(i32 %arg, i32* %arg1) {
; OPT-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP5:%.*]], [[LOOP]] ], [ 0, [[LOOP_END]] ]
; OPT-NEXT: [[TMP5]] = add nsw i32 [[TMP4]], [[TMP]]
; OPT-NEXT: [[TMP6:%.*]] = icmp slt i32 [[ARG]], [[TMP5]]
-; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
+; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[LOOP_END]], label [[LOOP]]
; OPT: loop_end:
; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: [[EXIT:%.*]] = icmp sgt i32 [[TMP5]], [[TMP2]]
-; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[EXIT]], i64 [[PHI_BROKEN1]])
+; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[EXIT]], i64 [[PHI_BROKEN1]])
; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP7]])
; OPT-NEXT: br i1 [[TMP3]], label [[LOOP_EXIT:%.*]], label [[LOOP]]
; OPT: loop_exit:
More information about the llvm-commits
mailing list