[llvm] 08131c7 - [AMDGPU] Fix a miscompile with S_ADD/S_SUB

Piotr Sobczak via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 17 03:26:41 PST 2021


Author: Piotr Sobczak
Date: 2021-02-17T12:24:58+01:00
New Revision: 08131c7439336e84b9df3e37b3aaeb76b8f60702

URL: https://github.com/llvm/llvm-project/commit/08131c7439336e84b9df3e37b3aaeb76b8f60702
DIFF: https://github.com/llvm/llvm-project/commit/08131c7439336e84b9df3e37b3aaeb76b8f60702.diff

LOG: [AMDGPU] Fix a miscompile with S_ADD/S_SUB

The helper function isBoolSGPR is too aggressive when determining
when a v_cndmask can be skipped on a boolean value because the
function does not check the operands of and/or/xor.

This can be problematic for the Add/Sub combines that can leave
bits set even for inactive lanes leading to wrong results.

Fix this by inspecting the operands of and/or/xor recursively.

Differential Revision: https://reviews.llvm.org/D86878

Added: 
    llvm/test/CodeGen/AMDGPU/combine-add-zext-xor.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 72555e896e37..ca2b50509bcd 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -8814,18 +8814,20 @@ SDValue SITargetLowering::splitBinaryBitConstantOp(
 }
 
 // Returns true if argument is a boolean value which is not serialized into
-// memory or argument and does not require v_cmdmask_b32 to be deserialized.
+// memory or argument and does not require v_cndmask_b32 to be deserialized.
 static bool isBoolSGPR(SDValue V) {
   if (V.getValueType() != MVT::i1)
     return false;
   switch (V.getOpcode()) {
-  default: break;
+  default:
+    break;
   case ISD::SETCC:
+  case AMDGPUISD::FP_CLASS:
+    return true;
   case ISD::AND:
   case ISD::OR:
   case ISD::XOR:
-  case AMDGPUISD::FP_CLASS:
-    return true;
+    return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1));
   }
   return false;
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/combine-add-zext-xor.ll b/llvm/test/CodeGen/AMDGPU/combine-add-zext-xor.ll
new file mode 100644
index 000000000000..48a57ac9d6eb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine-add-zext-xor.ll
@@ -0,0 +1,203 @@
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s
+
+; Test that unused lanes in the s_xor result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_add_zext_xor:
+; CHECK: s_xor_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_add_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+define i32 @combine_add_zext_xor() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %.2.0.in = xor i1 %.2.0.in.in, true
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = add i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+; Test that unused lanes in the s_xor result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_sub_zext_xor:
+; CHECK: s_xor_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_sub_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+
+define i32 @combine_sub_zext_xor() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %.2.0.in = xor i1 %.2.0.in.in, true
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = sub i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+; Test that unused lanes in the s_or result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_add_zext_or:
+; CHECK: s_or_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_add_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+
+define i32 @combine_add_zext_or() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %t = icmp sgt i32 %.2, -1050
+  %.2.0.in = or i1 %.2.0.in.in, %t
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = add i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+; Test that unused lanes in the s_or result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_sub_zext_or:
+; CHECK: s_or_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_sub_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+
+define i32 @combine_sub_zext_or() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %t = icmp sgt i32 %.2, -1050
+  %.2.0.in = or i1 %.2.0.in.in, %t
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = sub i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+; Test that unused lanes in the s_and result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_add_zext_and:
+; CHECK: s_and_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_add_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+
+define i32 @combine_add_zext_and() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %t = icmp sgt i32 %.2, -1050
+  %.2.0.in = and i1 %.2.0.in.in, %t
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = add i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+; Test that unused lanes in the s_and result are masked out with v_cndmask.
+
+; CHECK-LABEL: combine_sub_zext_and:
+; CHECK: s_and_b32 [[RESULT:s[0-9]+]]
+; CHECK: v_cndmask_b32_e64 [[ARG:v[0-9]+]], 0, 1, [[RESULT]]
+; CHECK: v_sub_nc_u32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ARG]]
+
+define i32 @combine_sub_zext_and() {
+.entry:
+  br label %.a
+
+.a:                                               ; preds = %bb9, %.entry
+  %.2 = phi i32 [ 0, %.entry ], [ %i11, %bb9 ]
+  br i1 undef, label %bb9, label %bb
+
+bb:                                               ; preds = %.a
+  %.i3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> undef, i32 %.2, i32 64, i32 1)
+  %i5 = icmp eq i32 %.i3, 0
+  br label %bb9
+
+bb9:                                              ; preds = %bb, %.a
+  %.2.0.in.in = phi i1 [ %i5, %bb ], [ undef, %.a ]
+  %t = icmp sgt i32 %.2, -1050
+  %.2.0.in = and i1 %.2.0.in.in, %t
+  %.2.0 = zext i1 %.2.0.in to i32
+  %i11 = sub i32 %.2, %.2.0
+  %i12 = icmp sgt i32 %.2, -1050
+  br i1 %i12, label %.a, label %.exit
+
+.exit:                                            ; preds = %bb9
+  ret i32 %.2.0
+}
+
+
+; Function Attrs: nounwind readonly willreturn
+declare i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg) #0
+
+attributes #0 = { nounwind readonly willreturn }
+


        


More information about the llvm-commits mailing list