[llvm] [AggressiveInstCombine] Use APInt and avoid truncation when folding loads (PR #118525)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 3 10:50:00 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Antonio Frighetto (antoniofrighetto)
<details>
<summary>Changes</summary>
A miscompilation issue has been addressed with refined checking.
Fixes: https://github.com/llvm/llvm-project/issues/118467.
---
Full diff: https://github.com/llvm/llvm-project/pull/118525.diff
3 Files Affected:
- (modified) llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp (+1-1)
- (modified) llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll (+10-10)
- (modified) llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll (+42-10)
``````````diff
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index b5b561797f75ab..96e768f3c797e3 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -804,7 +804,7 @@ static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL,
Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets(
DL, Offset1, /* AllowNonInbounds */ true);
Load1Ptr = Builder.CreatePtrAdd(Load1Ptr,
- Builder.getInt32(Offset1.getZExtValue()));
+ Builder.getInt(Offset1));
}
// Generate wider load.
NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),
diff --git a/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll b/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
index 1400ee7f703cab..10c4c9b0ca4c99 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/AArch64/or-load.ll
@@ -1121,19 +1121,19 @@ entry:
define i32 @loadCombine_4consecutive_metadata(ptr %p, ptr %pstr) {
; LE-LABEL: @loadCombine_4consecutive_metadata(
-; LE-NEXT: [[L1:%.*]] = load i32, ptr [[P:%.*]], align 1, !alias.scope !0
-; LE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias !0
+; LE-NEXT: [[L1:%.*]] = load i32, ptr [[P:%.*]], align 1, !alias.scope [[META0:![0-9]+]]
+; LE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias [[META0]]
; LE-NEXT: ret i32 [[L1]]
;
; BE-LABEL: @loadCombine_4consecutive_metadata(
; BE-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 1
; BE-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 2
; BE-NEXT: [[P3:%.*]] = getelementptr i8, ptr [[P]], i32 3
-; BE-NEXT: [[L1:%.*]] = load i8, ptr [[P]], align 1, !alias.scope !0
-; BE-NEXT: [[L2:%.*]] = load i8, ptr [[P1]], align 1, !alias.scope !0
-; BE-NEXT: [[L3:%.*]] = load i8, ptr [[P2]], align 1, !alias.scope !0
-; BE-NEXT: [[L4:%.*]] = load i8, ptr [[P3]], align 1, !alias.scope !0
-; BE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias !0
+; BE-NEXT: [[L1:%.*]] = load i8, ptr [[P]], align 1, !alias.scope [[META0:![0-9]+]]
+; BE-NEXT: [[L2:%.*]] = load i8, ptr [[P1]], align 1, !alias.scope [[META0]]
+; BE-NEXT: [[L3:%.*]] = load i8, ptr [[P2]], align 1, !alias.scope [[META0]]
+; BE-NEXT: [[L4:%.*]] = load i8, ptr [[P3]], align 1, !alias.scope [[META0]]
+; BE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias [[META0]]
; BE-NEXT: [[E1:%.*]] = zext i8 [[L1]] to i32
; BE-NEXT: [[E2:%.*]] = zext i8 [[L2]] to i32
; BE-NEXT: [[E3:%.*]] = zext i8 [[L3]] to i32
@@ -1869,7 +1869,7 @@ define i32 @loadCombine_4consecutive_badinsert2(ptr %p) {
define i32 @loadCombine_4consecutive_badinsert3(ptr %p) {
; LE-LABEL: @loadCombine_4consecutive_badinsert3(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 1
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 1
; LE-NEXT: [[L1:%.*]] = load i32, ptr [[TMP1]], align 1
; LE-NEXT: ret i32 [[L1]]
;
@@ -2088,7 +2088,7 @@ define i32 @loadCombine_4consecutive_badinsert6(ptr %p) {
define void @nested_gep(ptr %p, ptr %dest) {
; LE-LABEL: @nested_gep(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 68
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 68
; LE-NEXT: [[LD2:%.*]] = load i64, ptr [[TMP1]], align 4
; LE-NEXT: [[TRUNC:%.*]] = trunc i64 [[LD2]] to i32
; LE-NEXT: store i32 [[TRUNC]], ptr [[DEST:%.*]], align 4
@@ -2128,7 +2128,7 @@ define void @nested_gep(ptr %p, ptr %dest) {
define void @bitcast_gep(ptr %p, ptr %dest) {
; LE-LABEL: @bitcast_gep(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 68
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 68
; LE-NEXT: [[LD2:%.*]] = load i64, ptr [[TMP1]], align 4
; LE-NEXT: [[TRUNC:%.*]] = trunc i64 [[LD2]] to i32
; LE-NEXT: store i32 [[TRUNC]], ptr [[DEST:%.*]], align 4
diff --git a/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll b/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
index 0aa6f9ecdf884a..e0f80d53acb17e 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/X86/or-load.ll
@@ -1205,19 +1205,19 @@ entry:
define i32 @loadCombine_4consecutive_metadata(ptr %p, ptr %pstr) {
; LE-LABEL: @loadCombine_4consecutive_metadata(
-; LE-NEXT: [[L1:%.*]] = load i32, ptr [[P:%.*]], align 1, !alias.scope !0
-; LE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias !0
+; LE-NEXT: [[L1:%.*]] = load i32, ptr [[P:%.*]], align 1, !alias.scope [[META0:![0-9]+]]
+; LE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias [[META0]]
; LE-NEXT: ret i32 [[L1]]
;
; BE-LABEL: @loadCombine_4consecutive_metadata(
; BE-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 1
; BE-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 2
; BE-NEXT: [[P3:%.*]] = getelementptr i8, ptr [[P]], i32 3
-; BE-NEXT: [[L1:%.*]] = load i8, ptr [[P]], align 1, !alias.scope !0
-; BE-NEXT: [[L2:%.*]] = load i8, ptr [[P1]], align 1, !alias.scope !0
-; BE-NEXT: [[L3:%.*]] = load i8, ptr [[P2]], align 1, !alias.scope !0
-; BE-NEXT: [[L4:%.*]] = load i8, ptr [[P3]], align 1, !alias.scope !0
-; BE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias !0
+; BE-NEXT: [[L1:%.*]] = load i8, ptr [[P]], align 1, !alias.scope [[META0:![0-9]+]]
+; BE-NEXT: [[L2:%.*]] = load i8, ptr [[P1]], align 1, !alias.scope [[META0]]
+; BE-NEXT: [[L3:%.*]] = load i8, ptr [[P2]], align 1, !alias.scope [[META0]]
+; BE-NEXT: [[L4:%.*]] = load i8, ptr [[P3]], align 1, !alias.scope [[META0]]
+; BE-NEXT: store i32 25, ptr [[PSTR:%.*]], align 4, !noalias [[META0]]
; BE-NEXT: [[E1:%.*]] = zext i8 [[L1]] to i32
; BE-NEXT: [[E2:%.*]] = zext i8 [[L2]] to i32
; BE-NEXT: [[E3:%.*]] = zext i8 [[L3]] to i32
@@ -2005,7 +2005,7 @@ define i32 @loadCombine_4consecutive_badinsert2(ptr %p) {
define i32 @loadCombine_4consecutive_badinsert3(ptr %p) {
; LE-LABEL: @loadCombine_4consecutive_badinsert3(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 1
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 1
; LE-NEXT: [[L1:%.*]] = load i32, ptr [[TMP1]], align 1
; LE-NEXT: ret i32 [[L1]]
;
@@ -2306,7 +2306,7 @@ define i64 @loadCombine_nonConstShift2(ptr %arg, i8 %b) {
define void @nested_gep(ptr %p, ptr %dest) {
; LE-LABEL: @nested_gep(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 68
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 68
; LE-NEXT: [[LD2:%.*]] = load i64, ptr [[TMP1]], align 4
; LE-NEXT: [[TRUNC:%.*]] = trunc i64 [[LD2]] to i32
; LE-NEXT: store i32 [[TRUNC]], ptr [[DEST:%.*]], align 4
@@ -2346,7 +2346,7 @@ define void @nested_gep(ptr %p, ptr %dest) {
define void @bitcast_gep(ptr %p, ptr %dest) {
; LE-LABEL: @bitcast_gep(
-; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 68
+; LE-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 68
; LE-NEXT: [[LD2:%.*]] = load i64, ptr [[TMP1]], align 4
; LE-NEXT: [[TRUNC:%.*]] = trunc i64 [[LD2]] to i32
; LE-NEXT: store i32 [[TRUNC]], ptr [[DEST:%.*]], align 4
@@ -2382,3 +2382,35 @@ define void @bitcast_gep(ptr %p, ptr %dest) {
store i32 %trunc, ptr %dest, align 4
ret void
}
+
+define i32 @loadcombine_consecutive_idx_64(ptr noundef %data) {
+; LE-LABEL: @loadcombine_consecutive_idx_64(
+; LE-NEXT: entry:
+; LE-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DATA:%.*]], i64 2149675576
+; LE-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 1
+; LE-NEXT: [[TMP2:%.*]] = zext i16 [[TMP1]] to i32
+; LE-NEXT: ret i32 [[TMP2]]
+;
+; BE-LABEL: @loadcombine_consecutive_idx_64(
+; BE-NEXT: entry:
+; BE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[DATA:%.*]], i64 2149675577
+; BE-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; BE-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; BE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw i8, ptr [[DATA]], i64 2149675576
+; BE-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
+; BE-NEXT: [[CONV5:%.*]] = zext i8 [[TMP1]] to i32
+; BE-NEXT: [[SHL:%.*]] = shl nuw nsw i32 [[CONV]], 8
+; BE-NEXT: [[OR7:%.*]] = or disjoint i32 [[SHL]], [[CONV5]]
+; BE-NEXT: ret i32 [[OR7]]
+;
+entry:
+ %arrayidx = getelementptr inbounds nuw i8, ptr %data, i64 2149675577
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ %arrayidx4 = getelementptr inbounds nuw i8, ptr %data, i64 2149675576
+ %1 = load i8, ptr %arrayidx4, align 1
+ %conv5 = zext i8 %1 to i32
+ %shl = shl nuw nsw i32 %conv, 8
+ %or7 = or disjoint i32 %shl, %conv5
+ ret i32 %or7
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/118525
More information about the llvm-commits
mailing list