[llvm] 9deee6b - [SDAG] Don't transfer !range metadata without !noundef to SDAG (PR64589)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 14 00:04:36 PDT 2023


Author: Nikita Popov
Date: 2023-08-14T09:04:27+02:00
New Revision: 9deee6bffa9c331f46c68e5dd4cb4abf93dc0716

URL: https://github.com/llvm/llvm-project/commit/9deee6bffa9c331f46c68e5dd4cb4abf93dc0716
DIFF: https://github.com/llvm/llvm-project/commit/9deee6bffa9c331f46c68e5dd4cb4abf93dc0716.diff

LOG: [SDAG] Don't transfer !range metadata without !noundef to SDAG (PR64589)

D141386 changed the semantics of !range metadata to return poison
on violation. If !range is combined with !noundef, violation is
immediate UB instead, matching the old semantics.

In theory, these IR semantics should also carry over into SDAG.
In practice, DAGCombine has at least one key transform that is
invalid in the presence of poison, namely the conversion of logical
and/or to bitwise and/or (https://github.com/llvm/llvm-project/blob/c7b537bf0923df05254f9fa4722b298eb8f4790d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp#L11252).
Ideally, we would fix this transform, but this will require
substantial work to avoid codegen regressions.

In the meantime, avoid transferring !range metadata without
!noundef, effectively restoring the old !range metadata semantics
on the SDAG layer.

Fixes https://github.com/llvm/llvm-project/issues/64589.

Differential Revision: https://reviews.llvm.org/D157685

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
    llvm/test/CodeGen/AArch64/lower-range-metadata-func-call.ll
    llvm/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
    llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
    llvm/test/CodeGen/AMDGPU/load-range-metadata-assert.ll
    llvm/test/CodeGen/AMDGPU/load-range-metadata-sign-bits.ll
    llvm/test/CodeGen/PowerPC/BreakableToken-reduced.ll
    llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll
    llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
    llvm/test/CodeGen/X86/pr12360.ll
    llvm/test/CodeGen/X86/pr48458.ll
    llvm/test/CodeGen/X86/pr48888.ll
    llvm/test/CodeGen/X86/pr64589.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 80f5ecf7628e1c..5dd1f43888eb8f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4156,6 +4156,18 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
 }
 
+static const MDNode *getRangeMetadata(const Instruction &I) {
+  // If !noundef is not present, then !range violation results in a poison
+  // value rather than immediate undefined behavior. In theory, transferring
+  // these annotations to SDAG is fine, but in practice there are key SDAG
+  // transforms that are known not to be poison-safe, such as folding logical
+  // and/or to bitwise and/or. For now, only transfer !range if !noundef is
+  // also present.
+  if (!I.hasMetadata(LLVMContext::MD_noundef))
+    return nullptr;
+  return I.getMetadata(LLVMContext::MD_range);
+}
+
 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
   if (I.isAtomic())
     return visitAtomicLoad(I);
@@ -4188,7 +4200,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
 
   Align Alignment = I.getAlign();
   AAMDNodes AAInfo = I.getAAMetadata();
-  const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(I);
   bool isVolatile = I.isVolatile();
   MachineMemOperand::Flags MMOFlags =
       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
@@ -4593,7 +4605,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
     Alignment = DAG.getEVTAlign(VT);
 
   AAMDNodes AAInfo = I.getAAMetadata();
-  const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(I);
 
   // Do not serialize masked loads of constant memory with anything.
   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
@@ -4627,7 +4639,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
                         ->getMaybeAlignValue()
                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
 
-  const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(I);
 
   SDValue Root = DAG.getRoot();
   SDValue Base;
@@ -7632,7 +7644,7 @@ void SelectionDAGBuilder::visitVPLoad(
   Value *PtrOperand = VPIntrin.getArgOperand(0);
   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
-  const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(VPIntrin);
   SDValue LD;
   // Do not serialize variable-length loads of constant memory with
   // anything.
@@ -7659,7 +7671,7 @@ void SelectionDAGBuilder::visitVPGather(
   Value *PtrOperand = VPIntrin.getArgOperand(0);
   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
-  const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(VPIntrin);
   SDValue LD;
   if (!Alignment)
     Alignment = DAG.getEVTAlign(VT.getScalarType());
@@ -7766,7 +7778,7 @@ void SelectionDAGBuilder::visitVPStridedLoad(
   if (!Alignment)
     Alignment = DAG.getEVTAlign(VT.getScalarType());
   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
-  const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
+  const MDNode *Ranges = getRangeMetadata(VPIntrin);
   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
@@ -9613,7 +9625,7 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
                                                     const Instruction &I,
                                                     SDValue Op) {
-  const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
+  const MDNode *Range = getRangeMetadata(I);
   if (!Range)
     return Op;
 

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index c2d7d605fbc224..444ee2efb6d227 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -205,6 +205,8 @@ ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))};
       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
+      II.setMetadata(LLVMContext::MD_noundef,
+                     MDNode::get(II.getContext(), std::nullopt));
       return ⅈ
     }
     break;

diff  --git a/llvm/test/CodeGen/AArch64/lower-range-metadata-func-call.ll b/llvm/test/CodeGen/AArch64/lower-range-metadata-func-call.ll
index 4075db10c42b4f..5b9a734c49825a 100644
--- a/llvm/test/CodeGen/AArch64/lower-range-metadata-func-call.ll
+++ b/llvm/test/CodeGen/AArch64/lower-range-metadata-func-call.ll
@@ -7,7 +7,7 @@
 ; CHECK: ret
 define i32 @test_call_known_max_range() #0 {
 entry:
-  %id = tail call i32 @foo(), !range !0
+  %id = tail call i32 @foo(), !range !0, !noundef !{}
   %and = and i32 %id, 1023
   ret i32 %and
 }
@@ -18,7 +18,7 @@ entry:
 ; CHECK: ret
 define i32 @test_call_known_trunc_1_bit_range() #0 {
 entry:
-  %id = tail call i32 @foo(), !range !0
+  %id = tail call i32 @foo(), !range !0, !noundef !{}
   %and = and i32 %id, 511
   ret i32 %and
 }
@@ -29,7 +29,7 @@ entry:
 ; CHECK: ret
 define i32 @test_call_known_max_range_m1() #0 {
 entry:
-  %id = tail call i32 @foo(), !range !1
+  %id = tail call i32 @foo(), !range !1, !noundef !{}
   %and = and i32 %id, 255
   ret i32 %and
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll b/llvm/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
index 14b0e0ff47d41a..55b6fee09022c3 100644
--- a/llvm/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
@@ -30,14 +30,14 @@ define amdgpu_kernel void @test_private_array_ptr_calc(ptr addrspace(1) noalias
   %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo)
   %a_ptr = getelementptr inbounds i32, ptr addrspace(1) %inA, i32 %tid
   %b_ptr = getelementptr inbounds i32, ptr addrspace(1) %inB, i32 %tid
-  %a = load i32, ptr addrspace(1) %a_ptr, !range !0
-  %b = load i32, ptr addrspace(1) %b_ptr, !range !0
+  %a = load i32, ptr addrspace(1) %a_ptr, !range !0, !noundef !{}
+  %b = load i32, ptr addrspace(1) %b_ptr, !range !0, !noundef !{}
   %result = add i32 %a, %b
   %alloca_ptr = getelementptr inbounds [16 x i32], ptr addrspace(5) %alloca, i32 1, i32 %b
   store i32 %result, ptr addrspace(5) %alloca_ptr, align 4
   ; Dummy call
   call void @llvm.amdgcn.s.barrier()
-  %reload = load i32, ptr addrspace(5) %alloca_ptr, align 4, !range !0
+  %reload = load i32, ptr addrspace(5) %alloca_ptr, align 4, !range !0, !noundef !{}
   %out_ptr = getelementptr inbounds i32, ptr addrspace(1) %out, i32 %tid
   store i32 %reload, ptr addrspace(1) %out_ptr, align 4
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
index c59349d718a6ef..2f70ca5e521744 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
@@ -1396,7 +1396,7 @@ define amdgpu_ps float @global_load_f32_saddr_zext_vgpr_range(ptr addrspace(1) i
 ; GFX11-NEXT:    global_load_b32 v0, v0, s[2:3]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    ; return to shader part epilog
-  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !0
+  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !0, !noundef !{}
   %zext.offset = zext i32 %voffset to i64
   %gep = getelementptr inbounds float, ptr addrspace(1) %sbase, i64 %zext.offset
   %load = load float, ptr addrspace(1) %gep
@@ -1422,7 +1422,7 @@ define amdgpu_ps float @global_load_f32_saddr_zext_vgpr_range_imm_offset(ptr add
 ; GFX11-NEXT:    global_load_b32 v0, v0, s[2:3] offset:400
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    ; return to shader part epilog
-  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !0
+  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !0, !noundef !{}
   %zext.offset = zext i32 %voffset to i64
   %gep0 = getelementptr inbounds float, ptr addrspace(1) %sbase, i64 %zext.offset
   %gep1 = getelementptr inbounds float, ptr addrspace(1) %gep0, i64 100
@@ -1470,7 +1470,7 @@ define amdgpu_ps float @global_load_f32_saddr_zext_vgpr_range_too_large(ptr addr
 ; GFX11-NEXT:    global_load_b32 v0, v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    ; return to shader part epilog
-  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !1
+  %voffset = load i32, ptr addrspace(1) %voffset.ptr, !range !1, !noundef !{}
   %zext.offset = zext i32 %voffset to i64
   %gep = getelementptr inbounds float, ptr addrspace(1) %sbase, i64 %zext.offset
   %load = load float, ptr addrspace(1) %gep

diff  --git a/llvm/test/CodeGen/AMDGPU/load-range-metadata-assert.ll b/llvm/test/CodeGen/AMDGPU/load-range-metadata-assert.ll
index 76b95ed9b634a9..209f9516ba29a2 100644
--- a/llvm/test/CodeGen/AMDGPU/load-range-metadata-assert.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-range-metadata-assert.ll
@@ -8,7 +8,7 @@ define <2 x i32> @range_metata_sext_range_0_i24_i64_bitcast(ptr addrspace(1) %pt
 ; GCN-NEXT:    global_load_dwordx2 v[0:1], v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !0
+  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !0, !noundef !{}
   %shl = shl i64 %val, 22
   %ashr = ashr i64 %shl, 22
   %cast = bitcast i64 %ashr to <2 x i32>

diff  --git a/llvm/test/CodeGen/AMDGPU/load-range-metadata-sign-bits.ll b/llvm/test/CodeGen/AMDGPU/load-range-metadata-sign-bits.ll
index 6d006ab40c6ab2..a4cf4d6ed2c8ec 100644
--- a/llvm/test/CodeGen/AMDGPU/load-range-metadata-sign-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-range-metadata-sign-bits.ll
@@ -8,7 +8,7 @@ define i32 @range_metadata_sext_i8_signed_range_i32(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    global_load_dword v0, v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !0 ; [-127, 128)
+  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !0, !noundef !{} ; [-127, 128)
   %shl = shl i32 %val, 24
   %ashr = ashr i32 %shl, 24
   ret i32 %ashr
@@ -22,7 +22,7 @@ define i32 @range_metadata_sext_upper_range_limited_i32(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 8
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !1 ; [-127, 256)
+  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !1, !noundef !{} ; [-127, 256)
   %shl = shl i32 %val, 24
   %ashr = ashr i32 %shl, 24
   ret i32 %ashr
@@ -50,7 +50,7 @@ define i32 @range_metadata_sext_i8_neg_neg_range_i32(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v0, 63, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !3
+  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !3, !noundef !{}
   %shl = shl i32 %val, 25
   %ashr = ashr i32 %shl, 25
   ret i32 %ashr
@@ -63,7 +63,7 @@ define i32 @range_metadata_sextload_i8_signed_range_i4_i32(ptr addrspace(1) %ptr
 ; GCN-NEXT:    global_load_sbyte v0, v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %load = load volatile i8, ptr addrspace(1) %ptr, align 1, !range !4
+  %load = load volatile i8, ptr addrspace(1) %ptr, align 1, !range !4, !noundef !{}
   %shl = shl i8 %load, 3
   %ashr = ashr i8 %shl, 3
   %ext = sext i8 %ashr to i32
@@ -78,7 +78,7 @@ define i25 @range_metadata_sext_i8_signed_range_i25(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 2
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i25, ptr addrspace(1) %ptr, align 4, !range !5
+  %val = load volatile i25, ptr addrspace(1) %ptr, align 4, !range !5, !noundef !{}
   %shl = shl i25 %val, 23
   %ashr = ashr i25 %shl, 23
   ret i25 %ashr
@@ -91,7 +91,7 @@ define i32 @range_metadata_i32_neg1_to_1(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    global_load_dword v0, v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !6
+  %val = load volatile i32, ptr addrspace(1) %ptr, align 4, !range !6, !noundef !{}
   %shl = shl i32 %val, 31
   %ashr = ashr i32 %shl, 31
   ret i32 %ashr
@@ -106,7 +106,7 @@ define i64 @range_metadata_sext_i8_signed_range_i64(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 23, v0
 ; GCN-NEXT:    v_ashrrev_i64 v[0:1], 55, v[0:1]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !7
+  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !7, !noundef !{}
   %shl = shl i64 %val, 55
   %ashr = ashr i64 %shl, 55
   ret i64 %ashr
@@ -119,7 +119,7 @@ define i64 @range_metadata_sext_i32_signed_range_i64(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    global_load_dwordx2 v[0:1], v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !7
+  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !7, !noundef !{}
   %shl = shl i64 %val, 31
   %ashr = ashr i64 %shl, 31
   ret i64 %ashr
@@ -132,7 +132,7 @@ define i64 @range_metadata_sext_i33_signed_range_i64(ptr addrspace(1) %ptr) {
 ; GCN-NEXT:    global_load_dwordx2 v[0:1], v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !8
+  %val = load volatile i64, ptr addrspace(1) %ptr, align 4, !range !8, !noundef !{}
   %shl = shl i64 %val, 30
   %ashr = ashr i64 %shl, 30
   ret i64 %ashr

diff  --git a/llvm/test/CodeGen/PowerPC/BreakableToken-reduced.ll b/llvm/test/CodeGen/PowerPC/BreakableToken-reduced.ll
index ece0fbd73df9ff..5302257076071e 100644
--- a/llvm/test/CodeGen/PowerPC/BreakableToken-reduced.ll
+++ b/llvm/test/CodeGen/PowerPC/BreakableToken-reduced.ll
@@ -295,7 +295,7 @@ _ZNK4llvm9StringRef10startswithES0_.exit:         ; preds = %entry._ZNK4llvm9Str
   %agg.tmp6.sroa.2.0..sroa_idx14 = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 3, i32 1
   %agg.tmp6.sroa.2.0.copyload = load i64, ptr %agg.tmp6.sroa.2.0..sroa_idx14, align 8
   %InPPDirective = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 0, i32 3
-  %5 = load i8, ptr %InPPDirective, align 4, !tbaa !34, !range !39
+  %5 = load i8, ptr %InPPDirective, align 4, !tbaa !34, !range !39, !noundef !{}
   %tobool = icmp ne i8 %5, 0
   %IndentLevel = getelementptr inbounds %"class.clang::format::BreakableStringLiteral", ptr %this, i64 0, i32 0, i32 0, i32 2
   %6 = load i32, ptr %IndentLevel, align 8, !tbaa !33

diff  --git a/llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll b/llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll
index 8eb1765507cbc1..14fea561831985 100644
--- a/llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll
+++ b/llvm/test/CodeGen/PowerPC/global-address-non-got-indirect-access.ll
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    plbz r3, _ZL13StaticBoolVar at PCREL(0), 1
 ; CHECK-NEXT:    blr
 entry:
-  %0 = load i8, ptr @_ZL13StaticBoolVar, align 1, !range !0
+  %0 = load i8, ptr @_ZL13StaticBoolVar, align 1, !range !0, !noundef !{}
   %tobool = icmp ne i8 %0, 0
   ret i1 %tobool
 }

diff  --git a/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll b/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
index e8aa5778bc6303..1c595b7fb5e1e3 100644
--- a/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
+++ b/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
@@ -11,7 +11,7 @@ define i64 @split_assertzext(ptr %x) nounwind {
 ; CHECK-NEXT:    popq %rcx
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %e = call <16 x i64> @test(), !range !0
+  %e = call <16 x i64> @test(), !range !0, !noundef !{}
   %d = extractelement <16 x i64> %e, i32 15
   ret i64 %d
 }
@@ -29,7 +29,7 @@ define i64 @widen_assertzext(ptr %x) nounwind {
 ; CHECK-NEXT:    popq %rcx
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %e = call <7 x i64> @test2(), !range !0
+  %e = call <7 x i64> @test2(), !range !0, !noundef !{}
   %d = extractelement <7 x i64> %e, i32 6
   ret i64 %d
 }

diff  --git a/llvm/test/CodeGen/X86/pr12360.ll b/llvm/test/CodeGen/X86/pr12360.ll
index e0defce3acafa4..1ef11ebc9794e8 100644
--- a/llvm/test/CodeGen/X86/pr12360.ll
+++ b/llvm/test/CodeGen/X86/pr12360.ll
@@ -8,7 +8,7 @@ define zeroext i1 @f1(ptr %x) {
 ; CHECK-NEXT:    retq
 
 entry:
-  %0 = load i8, ptr %x, align 1, !range !0
+  %0 = load i8, ptr %x, align 1, !range !0, !noundef !{}
   %tobool = trunc i8 %0 to i1
   ret i1 %tobool
 }
@@ -20,7 +20,7 @@ define zeroext i1 @f2(ptr %x) {
 ; CHECK-NEXT:    retq
 
 entry:
-  %0 = load i8, ptr %x, align 1, !range !0
+  %0 = load i8, ptr %x, align 1, !range !0, !noundef !{}
   %tobool = icmp ne i8 %0, 0
   ret i1 %tobool
 }

diff  --git a/llvm/test/CodeGen/X86/pr48458.ll b/llvm/test/CodeGen/X86/pr48458.ll
index b157a9f2cc4f65..76060d5c62994f 100644
--- a/llvm/test/CodeGen/X86/pr48458.ll
+++ b/llvm/test/CodeGen/X86/pr48458.ll
@@ -9,7 +9,7 @@ define i1 @foo(ptr %0) {
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    retq
 top:
-  %1 = load i64, ptr %0, !range !0
+  %1 = load i64, ptr %0, !range !0, !noundef !{}
   %2 = icmp ult i64 %1, 2147483648
   ret i1 %2
 }

diff  --git a/llvm/test/CodeGen/X86/pr48888.ll b/llvm/test/CodeGen/X86/pr48888.ll
index 5fa5b844f1eb0e..5e850e5690e0d0 100644
--- a/llvm/test/CodeGen/X86/pr48888.ll
+++ b/llvm/test/CodeGen/X86/pr48888.ll
@@ -10,7 +10,7 @@ define void @test(ptr %p) nounwind {
 ; CHECK-NEXT:    cmpl $2, %eax
 ; CHECK-NEXT:    retq
 bb:
-  %i = load i64, ptr %p, align 8, !range !0
+  %i = load i64, ptr %p, align 8, !range !0, !noundef !{}
   %i1 = and i64 %i, 6
   %i2 = icmp eq i64 %i1, 2
   br i1 %i2, label %bb3, label %bb5

diff  --git a/llvm/test/CodeGen/X86/pr64589.ll b/llvm/test/CodeGen/X86/pr64589.ll
index 0695e20d592923..130ef517ae28eb 100644
--- a/llvm/test/CodeGen/X86/pr64589.ll
+++ b/llvm/test/CodeGen/X86/pr64589.ll
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=x86_64-- < %s | FileCheck %s
 
-; FIXME: This is a miscompile.
 ; It's not possible to directly or the two loads together, because this might
 ; propagate a poison value from the second load (which has !range but not
 ; !noundef).
@@ -10,6 +9,7 @@ define i8 @test(ptr %p) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl (%rdi), %eax
 ; CHECK-NEXT:    orb 1(%rdi), %al
+; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    addb %al, %al
 ; CHECK-NEXT:    retq
   %v1 = load i8, ptr %p, align 4, !range !0, !noundef !{}


        


More information about the llvm-commits mailing list