[llvm-branch-commits] [llvm] release/18.x: [DAGCombiner] Fix miscompile bug in combineShiftOfShiftedLogic (#89616) (PR #89766)

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Apr 25 09:54:20 PDT 2024


=?utf-8?q?Björn?= Pettersson <bjorn.a.pettersson at ericsson.com>
Message-ID:
In-Reply-To: <llvm.org/llvm/llvm-project/pull/89766 at github.com>


https://github.com/tstellar updated https://github.com/llvm/llvm-project/pull/89766

>From 1aa91720cc4fa1511c0851269babb40a05401193 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Mon, 22 Apr 2024 17:34:48 +0200
Subject: [PATCH 1/2] [DAGCombiner] Pre-commit test case for miscompile bug in
 combineShiftOfShiftedLogic

DAGCombiner is trying to fold shl over binops, and in the process
combining it with another shl. However it needs to be more careful
to ensure that the sum of the shift counts fits in the type used
for the shift amount.
For example, X86 is using i8 as shift amount type. So we need to
make sure that the sum of the shift amounts isn't greater than 255.

Fix will be applied in a later commit. This only pre-commits the
test case to show that we currently get the wrong result.

Bug was found when testing the C23 BitInt feature.

(cherry picked from commit 5fd9bbdea6cc248469d5465de44e747378ffafcb)
---
 llvm/test/CodeGen/X86/shift-combine.ll | 65 ++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

diff --git a/llvm/test/CodeGen/X86/shift-combine.ll b/llvm/test/CodeGen/X86/shift-combine.ll
index cf45641fba6321..f5bf3de9114dc5 100644
--- a/llvm/test/CodeGen/X86/shift-combine.ll
+++ b/llvm/test/CodeGen/X86/shift-combine.ll
@@ -787,3 +787,68 @@ define <4 x i32> @or_tree_with_mismatching_shifts_vec_i32(<4 x i32> %a, <4 x i32
   %r = or <4 x i32> %or.ab, %or.cd
   ret <4 x i32> %r
 }
+
+; FIXME: Reproducer for a DAGCombiner::combineShiftOfShiftedLogic
+; bug. DAGCombiner need to check that the sum of the shift amounts fits in i8,
+; which is the legal type used to described X86 shift amounts. Verify that we
+; do not try to create a shift with 130+160 as shift amount, and verify that
+; the stored value do not depend on %a1.
+define void @combineShiftOfShiftedLogic(i128 %a1, i32 %a2, ptr %p) {
+; X86-LABEL: combineShiftOfShiftedLogic:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    .cfi_offset %esi, -16
+; X86-NEXT:    .cfi_offset %edi, -12
+; X86-NEXT:    .cfi_offset %ebx, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    shldl $2, %ebx, %edi
+; X86-NEXT:    shldl $2, %edx, %ebx
+; X86-NEXT:    shrl $30, %esi
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    shldl $2, %ecx, %edx
+; X86-NEXT:    shll $2, %ecx
+; X86-NEXT:    movl %edi, 16(%eax)
+; X86-NEXT:    movl %ebx, 12(%eax)
+; X86-NEXT:    movl %edx, 8(%eax)
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl %esi, 20(%eax)
+; X86-NEXT:    movl $0, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: combineShiftOfShiftedLogic:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-NEXT:    shlq $32, %rdx
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    shrq $30, %rax
+; X64-NEXT:    orq %rdx, %rax
+; X64-NEXT:    shldq $34, %rdi, %rsi
+; X64-NEXT:    shlq $34, %rdi
+; X64-NEXT:    movq %rsi, 8(%rcx)
+; X64-NEXT:    movq %rdi, (%rcx)
+; X64-NEXT:    movq %rax, 16(%rcx)
+; X64-NEXT:    retq
+  %zext1 = zext i128 %a1 to i192
+  %zext2 = zext i32 %a2 to i192
+  %shl = shl i192 %zext1, 130
+  %or = or i192 %shl, %zext2
+  %res = shl i192 %or, 160
+  store i192 %res, ptr %p, align 8
+  ret void
+}

>From 78b99c73ee4b96fe9ce0e294d4632326afb2db42 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bj=C3=B6rn=20Pettersson?= <bjorn.a.pettersson at ericsson.com>
Date: Tue, 23 Apr 2024 14:11:34 +0200
Subject: [PATCH 2/2] [DAGCombiner] Fix miscompile bug in
 combineShiftOfShiftedLogic (#89616)

Ensure that the sum of the shift amounts does not overflow the
shift amount type when combining shifts in combineShiftOfShiftedLogic.

Solves a miscompile bug found when testing the C23 BitInt feature.

Targets like X86 that only use an i8 for shift amounts after
legalization seems to be extra susceptible for bugs like this as it
isn't legal to shift more than 255 steps.

(cherry picked from commit f9b419b7a038dcd51a7943b160acc867714c595f)
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  9 ++-
 llvm/test/CodeGen/X86/shift-combine.ll        | 58 +++++--------------
 2 files changed, 22 insertions(+), 45 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index e806e0f0731f23..5038f8a1fc1562 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9636,8 +9636,15 @@ static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) {
     if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth())
       return false;
 
+    // The fold is not valid if the sum of the shift values doesn't fit in the
+    // given shift amount type.
+    bool Overflow = false;
+    APInt NewShiftAmt = C1Val.uadd_ov(*ShiftAmtVal, Overflow);
+    if (Overflow)
+      return false;
+
     // The fold is not valid if the sum of the shift values exceeds bitwidth.
-    if ((*ShiftAmtVal + C1Val).uge(V.getScalarValueSizeInBits()))
+    if (NewShiftAmt.uge(V.getScalarValueSizeInBits()))
       return false;
 
     return true;
diff --git a/llvm/test/CodeGen/X86/shift-combine.ll b/llvm/test/CodeGen/X86/shift-combine.ll
index f5bf3de9114dc5..3316a332fafdff 100644
--- a/llvm/test/CodeGen/X86/shift-combine.ll
+++ b/llvm/test/CodeGen/X86/shift-combine.ll
@@ -788,61 +788,31 @@ define <4 x i32> @or_tree_with_mismatching_shifts_vec_i32(<4 x i32> %a, <4 x i32
   ret <4 x i32> %r
 }
 
-; FIXME: Reproducer for a DAGCombiner::combineShiftOfShiftedLogic
-; bug. DAGCombiner need to check that the sum of the shift amounts fits in i8,
-; which is the legal type used to described X86 shift amounts. Verify that we
-; do not try to create a shift with 130+160 as shift amount, and verify that
-; the stored value do not depend on %a1.
+; Reproducer for a DAGCombiner::combineShiftOfShiftedLogic bug. DAGCombiner
+; need to check that the sum of the shift amounts fits in i8, which is the
+; legal type used to described X86 shift amounts. Verify that we do not try to
+; create a shift with 130+160 as shift amount, and verify that the stored
+; value do not depend on %a1.
 define void @combineShiftOfShiftedLogic(i128 %a1, i32 %a2, ptr %p) {
 ; X86-LABEL: combineShiftOfShiftedLogic:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebx
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 16
-; X86-NEXT:    .cfi_offset %esi, -16
-; X86-NEXT:    .cfi_offset %edi, -12
-; X86-NEXT:    .cfi_offset %ebx, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl %edi, %esi
-; X86-NEXT:    shldl $2, %ebx, %edi
-; X86-NEXT:    shldl $2, %edx, %ebx
-; X86-NEXT:    shrl $30, %esi
-; X86-NEXT:    orl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    shldl $2, %ecx, %edx
-; X86-NEXT:    shll $2, %ecx
-; X86-NEXT:    movl %edi, 16(%eax)
-; X86-NEXT:    movl %ebx, 12(%eax)
-; X86-NEXT:    movl %edx, 8(%eax)
-; X86-NEXT:    movl %ecx, 4(%eax)
-; X86-NEXT:    movl %esi, 20(%eax)
-; X86-NEXT:    movl $0, (%eax)
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    popl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    popl %ebx
-; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    movl %eax, 20(%ecx)
+; X86-NEXT:    movl $0, 16(%ecx)
+; X86-NEXT:    movl $0, 12(%ecx)
+; X86-NEXT:    movl $0, 8(%ecx)
+; X86-NEXT:    movl $0, 4(%ecx)
+; X86-NEXT:    movl $0, (%ecx)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: combineShiftOfShiftedLogic:
 ; X64:       # %bb.0:
 ; X64-NEXT:    # kill: def $edx killed $edx def $rdx
 ; X64-NEXT:    shlq $32, %rdx
-; X64-NEXT:    movq %rsi, %rax
-; X64-NEXT:    shrq $30, %rax
-; X64-NEXT:    orq %rdx, %rax
-; X64-NEXT:    shldq $34, %rdi, %rsi
-; X64-NEXT:    shlq $34, %rdi
-; X64-NEXT:    movq %rsi, 8(%rcx)
-; X64-NEXT:    movq %rdi, (%rcx)
-; X64-NEXT:    movq %rax, 16(%rcx)
+; X64-NEXT:    movq %rdx, 16(%rcx)
+; X64-NEXT:    movq $0, 8(%rcx)
+; X64-NEXT:    movq $0, (%rcx)
 ; X64-NEXT:    retq
   %zext1 = zext i128 %a1 to i192
   %zext2 = zext i32 %a2 to i192



More information about the llvm-branch-commits mailing list