[llvm] 8cdca96 - [GVN] Adjust metadata for coerced load CSE

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 17 03:52:38 PDT 2023


Author: Nikita Popov
Date: 2023-04-17T12:52:31+02:00
New Revision: 8cdca966902a5af9c28b5f6fb8602b22a07808a1

URL: https://github.com/llvm/llvm-project/commit/8cdca966902a5af9c28b5f6fb8602b22a07808a1
DIFF: https://github.com/llvm/llvm-project/commit/8cdca966902a5af9c28b5f6fb8602b22a07808a1.diff

LOG: [GVN] Adjust metadata for coerced load CSE

When reusing a load in a way that requires coercion (i.e. casts or
bit extraction) we currently fail to adjust metadata. Unfortunately,
none of our existing tooling for this is really suitable, because
combineMetadataForCSE() expects both loads to have the same type.
In this case we may work on loads of different types and possibly
offset memory location.

As such, what this patch does is to simply drop all metadata, with
the following exceptions:

* Metadata for which violation is known to always cause UB.
* If the load is !noundef, keep all metadata, as this will turn
  poison-generating metadata into UB as well.

This fixes the miscompile that was exposed by D146629.

Differential Revision: https://reviews.llvm.org/D148129

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/GVN.cpp
    llvm/test/Transforms/GVN/metadata.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 35a3aa9c21a58..291a426f20cfa 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -999,6 +999,20 @@ Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
       Res = CoercedLoad;
     } else {
       Res = getValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
+      // We are adding a new user for this load, for which the original
+      // metadata may not hold. Additionally, the new load may have a 
diff erent
+      // size and type, so their metadata cannot be combined in any
+      // straightforward way.
+      // Drop all metadata that is not known to cause immediate UB on violation,
+      // unless the load has !noundef, in which case all metadata violations
+      // will be promoted to UB.
+      // TODO: We can combine noalias/alias.scope metadata here, because it is
+      // independent of the load type.
+      if (!CoercedLoad->hasMetadata(LLVMContext::MD_noundef))
+        CoercedLoad->dropUnknownNonDebugMetadata(
+            {LLVMContext::MD_dereferenceable,
+             LLVMContext::MD_dereferenceable_or_null,
+             LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
                         << "  " << *getCoercedLoadValue() << '\n'
                         << *Res << '\n'

diff  --git a/llvm/test/Transforms/GVN/metadata.ll b/llvm/test/Transforms/GVN/metadata.ll
index d6331125f809b..8e6fee58adbc1 100644
--- a/llvm/test/Transforms/GVN/metadata.ll
+++ b/llvm/test/Transforms/GVN/metadata.ll
@@ -168,7 +168,7 @@ define void @load_dereferenceable_not_dominating(ptr %p) {
 define void @load_ptr_nonnull_to_i64(ptr %p) {
 ; CHECK-LABEL: define void @load_ptr_nonnull_to_i64
 ; CHECK-SAME: (ptr [[P:%.*]]) {
-; CHECK-NEXT:    [[VAL:%.*]] = load ptr, ptr [[P]], align 8, !nonnull !6
+; CHECK-NEXT:    [[VAL:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[VAL_INT:%.*]] = ptrtoint ptr [[VAL]] to i64
 ; CHECK-NEXT:    call void @use.i64(i64 [[VAL_INT]])
 ; CHECK-NEXT:    call void @use.i64(i64 [[VAL_INT]])
@@ -253,7 +253,7 @@ define void @load_ptr_dereferenceable_or_null_to_i64(ptr %p) {
 define void @load_ptr_nonnull_to_i32(ptr %p) {
 ; CHECK-LABEL: define void @load_ptr_nonnull_to_i32
 ; CHECK-SAME: (ptr [[P:%.*]]) {
-; CHECK-NEXT:    [[VAL:%.*]] = load ptr, ptr [[P]], align 8, !nonnull !6
+; CHECK-NEXT:    [[VAL:%.*]] = load ptr, ptr [[P]], align 8
 ; CHECK-NEXT:    [[VAL_INT:%.*]] = ptrtoint ptr [[VAL]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[VAL_INT]] to i32
 ; CHECK-NEXT:    call void @use.i64(i64 [[VAL_INT]])
@@ -271,7 +271,7 @@ define void @load_ptr_nonnull_to_i32(ptr %p) {
 define void @load_i64_range_to_i32_range(ptr %p) {
 ; CHECK-LABEL: define void @load_i64_range_to_i32_range
 ; CHECK-SAME: (ptr [[P:%.*]]) {
-; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[P]], align 8, !range [[RNG8:![0-9]+]]
+; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[P]], align 8
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[VAL]] to i32
 ; CHECK-NEXT:    call void @use.i64(i64 [[VAL]])
 ; CHECK-NEXT:    call void @use.i32(i32 [[TMP1]])
@@ -305,5 +305,4 @@ define void @load_i64_range_to_i32_range(ptr %p) {
 ; CHECK: [[RNG5]] = !{i32 3, i32 4, i32 5, i32 2}
 ; CHECK: [[META6:![0-9]+]] = !{}
 ; CHECK: [[META7:![0-9]+]] = !{i64 10}
-; CHECK: [[RNG8]] = !{i64 0, i64 10}
 ;.


        


More information about the llvm-commits mailing list