[llvm] Preserve range metadata when load is narrowed (PR #128144)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Feb 22 21:28:10 PST 2025
https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/128144
>From 8a5fdc13ad7fafdc8bdc122d4123ea748999bcb8 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 21 Feb 2025 01:38:05 -0600
Subject: [PATCH 1/7] Preserve range information when load is narrowed
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 39 ++++++++++++++++---
1 file changed, 33 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b07f3814d9d2d..4eb2b4c726564 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14903,12 +14903,39 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
AddToWorklist(NewPtr.getNode());
SDValue Load;
- if (ExtType == ISD::NON_EXTLOAD)
- Load = DAG.getLoad(VT, DL, LN0->getChain(), NewPtr,
- LN0->getPointerInfo().getWithOffset(PtrOff),
- LN0->getOriginalAlign(),
- LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
- else
+ if (ExtType == ISD::NON_EXTLOAD) {
+ const MDNode *OldRanges = LN0->getRanges();
+ const MDNode *NewRanges = nullptr;
+ /* If LSBs are loaded and all bounds in the OldRanges metadata fit in
+ the narrower size, preserve the range information by translating
+ to the the new narrower type, NewTy */
+ if (ShAmt == 0 && OldRanges) {
+ Type *NewTy = VT.getTypeForEVT(*DAG.getContext());
+ const unsigned NumOperands = OldRanges->getNumOperands();
+ const unsigned NewWidth = NewTy->getIntegerBitWidth();
+ bool InRange = true;
+ SmallVector<Metadata *, 4> Bounds;
+ Bounds.reserve(NumOperands);
+
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ const APInt &BoundValue =
+ mdconst::extract<ConstantInt>(OldRanges->getOperand(i))->getValue();
+ if (BoundValue.getBitWidth() - BoundValue.getNumSignBits() >=
+ NewWidth) {
+ InRange = false;
+ break;
+ }
+ Bounds.push_back(ConstantAsMetadata::get(
+ ConstantInt::get(NewTy, BoundValue.trunc(NewWidth))));
+ }
+ if (InRange)
+ NewRanges = MDNode::get(*DAG.getContext(), Bounds);
+ }
+ Load = DAG.getLoad(
+ VT, DL, LN0->getChain(), NewPtr,
+ LN0->getPointerInfo().getWithOffset(PtrOff), LN0->getOriginalAlign(),
+ LN0->getMemOperand()->getFlags(), LN0->getAAInfo(), NewRanges);
+ } else
Load = DAG.getExtLoad(ExtType, DL, VT, LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
LN0->getOriginalAlign(),
>From c9b533dfdc0d917346fb88b6a33eacf2fc9a167a Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 21 Feb 2025 01:38:49 -0600
Subject: [PATCH 2/7] Reduce 64-bit shl to 32-bit based on range metadata
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 38 +++++++++++++++++++++---
1 file changed, 34 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 05430213c17d2..55bfc079cb1c4 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -21,11 +21,39 @@ define i64 @shl_metadata(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v1, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
+ %shl = shl i64 %arg0, %shift.amt
+ ret i64 %shl
+}
+
+define i64 @shl_metadata_two_ranges(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata_two_ranges:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v1, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
+ %shl = shl i64 %arg0, %shift.amt
+ ret i64 %shl
+}
+
+define i64 @shl_metadata_out_of_range(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata_out_of_range:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dword v2, v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load i64, ptr %arg1.ptr, !range !0
+ %shift.amt = load i64, ptr %arg1.ptr, !range !2, !noundef !{}
%shl = shl i64 %arg0, %shift.amt
ret i64 %shl
}
@@ -39,7 +67,7 @@ define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
; CHECK-NEXT: v_lshlrev_b64 v[2:3], v6, v[2:3]
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0
+ %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%shl = shl <2 x i64> %arg0, %shift.amt
ret <2 x i64> %shl
}
@@ -55,7 +83,7 @@ define <3 x i64> @shl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
; CHECK-NEXT: v_lshlrev_b64 v[2:3], v10, v[2:3]
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0
+ %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%shl = shl <3 x i64> %arg0, %shift.amt
ret <3 x i64> %shl
}
@@ -74,12 +102,14 @@ define <4 x i64> @shl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: v_lshlrev_b64 v[4:5], v13, v[4:5]
; CHECK-NEXT: v_lshlrev_b64 v[6:7], v15, v[6:7]
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0
+ %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%shl = shl <4 x i64> %arg0, %shift.amt
ret <4 x i64> %shl
}
!0 = !{i64 32, i64 64}
+!1 = !{i64 32, i64 38, i64 42, i64 48}
+!2 = !{i64 31, i64 38, i64 42, i64 48}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Test range with an "or X, 16"
>From 3a9f8de8a49f4edec2521df521c02f505d6689ed Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 21 Feb 2025 10:18:07 -0600
Subject: [PATCH 3/7] Add negative test when bounds can't be truncated to i32
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 55bfc079cb1c4..955cc079704d2 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -58,6 +58,23 @@ define i64 @shl_metadata_out_of_range(i64 %arg0, ptr %arg1.ptr) {
ret i64 %shl
}
+; Bounds cannot be truncated to i32 when load is narrowed to i32.
+; Reduction not done.
+; Bounds were chosen so that if bounds were truncated to i32 the
+; known minimum would be 32 and the shl would be erroneously reduced.
+define i64 @shl_metadata_cant_be_narrowed_to_i32(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata_cant_be_narrowed_to_i32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load i64, ptr %arg1.ptr, !range !3, !noundef !{}
+ %shl = shl i64 %arg0, %shift.amt
+ ret i64 %shl
+}
+
define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v2_metadata:
; CHECK: ; %bb.0:
@@ -110,6 +127,7 @@ define <4 x i64> @shl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
!0 = !{i64 32, i64 64}
!1 = !{i64 32, i64 38, i64 42, i64 48}
!2 = !{i64 31, i64 38, i64 42, i64 48}
+!3 = !{i64 32, i64 38, i64 2147483680, i64 2147483681}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Test range with an "or X, 16"
>From d9b8042dfa2e56bd77a0246bc7671aefaf74b0af Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 21 Feb 2025 10:21:38 -0600
Subject: [PATCH 4/7] Update testcase comments
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 955cc079704d2..69242f4e44840 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -13,10 +13,6 @@
; Test range with metadata
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
-; determine the minimum from metadata in this case. Match current results
-; for now.
-
define i64 @shl_metadata(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_metadata:
; CHECK: ; %bb.0:
@@ -45,6 +41,7 @@ define i64 @shl_metadata_two_ranges(i64 %arg0, ptr %arg1.ptr) {
ret i64 %shl
}
+; Known minimum is too low. Reduction must not be done.
define i64 @shl_metadata_out_of_range(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_metadata_out_of_range:
; CHECK: ; %bb.0:
@@ -59,7 +56,7 @@ define i64 @shl_metadata_out_of_range(i64 %arg0, ptr %arg1.ptr) {
}
; Bounds cannot be truncated to i32 when load is narrowed to i32.
-; Reduction not done.
+; Reduction must not be done.
; Bounds were chosen so that if bounds were truncated to i32 the
; known minimum would be 32 and the shl would be erroneously reduced.
define i64 @shl_metadata_cant_be_narrowed_to_i32(i64 %arg0, ptr %arg1.ptr) {
@@ -75,6 +72,7 @@ define i64 @shl_metadata_cant_be_narrowed_to_i32(i64 %arg0, ptr %arg1.ptr) {
ret i64 %shl
}
+; FIXME: This case should be reduced
define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v2_metadata:
; CHECK: ; %bb.0:
@@ -89,6 +87,7 @@ define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
ret <2 x i64> %shl
}
+; FIXME: This case should be reduced
define <3 x i64> @shl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v3_metadata:
; CHECK: ; %bb.0:
@@ -105,6 +104,7 @@ define <3 x i64> @shl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
ret <3 x i64> %shl
}
+; FIXME: This case should be reduced
define <4 x i64> @shl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v4_metadata:
; CHECK: ; %bb.0:
>From 32bd7e56fec6ad8d4d94a1f76853b29bae923c90 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 21 Feb 2025 17:32:00 -0600
Subject: [PATCH 5/7] Simplify bitwidth checking logic with ConstantRange
utility function
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 +++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4eb2b4c726564..3646853aecec9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14912,21 +14912,29 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
if (ShAmt == 0 && OldRanges) {
Type *NewTy = VT.getTypeForEVT(*DAG.getContext());
const unsigned NumOperands = OldRanges->getNumOperands();
+ unsigned NumRanges = NumOperands / 2;
const unsigned NewWidth = NewTy->getIntegerBitWidth();
bool InRange = true;
SmallVector<Metadata *, 4> Bounds;
Bounds.reserve(NumOperands);
- for (unsigned i = 0; i < NumOperands; ++i) {
- const APInt &BoundValue =
- mdconst::extract<ConstantInt>(OldRanges->getOperand(i))->getValue();
- if (BoundValue.getBitWidth() - BoundValue.getNumSignBits() >=
- NewWidth) {
+ for (unsigned i = 0; i < NumRanges; ++i) {
+ const APInt &LowValue =
+ mdconst::extract<ConstantInt>(OldRanges->getOperand(2 * i))
+ ->getValue();
+ const APInt &HighValue =
+ mdconst::extract<ConstantInt>(OldRanges->getOperand(2 * i + 1))
+ ->getValue();
+ ConstantRange CurRange(LowValue, HighValue);
+
+ if (CurRange.getMinSignedBits() > NewWidth) {
InRange = false;
break;
}
Bounds.push_back(ConstantAsMetadata::get(
- ConstantInt::get(NewTy, BoundValue.trunc(NewWidth))));
+ ConstantInt::get(NewTy, LowValue.trunc(NewWidth))));
+ Bounds.push_back(ConstantAsMetadata::get(
+ ConstantInt::get(NewTy, HighValue.trunc(NewWidth))));
}
if (InRange)
NewRanges = MDNode::get(*DAG.getContext(), Bounds);
>From c933174da627fbb2654462b22652561ae2f76d07 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Sat, 22 Feb 2025 11:31:15 -0600
Subject: [PATCH 6/7] Make one new range to cover all sub-ranges
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 41 ++++++++-----------
1 file changed, 17 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3646853aecec9..ef8f4e4529311 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14912,32 +14912,25 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
if (ShAmt == 0 && OldRanges) {
Type *NewTy = VT.getTypeForEVT(*DAG.getContext());
const unsigned NumOperands = OldRanges->getNumOperands();
- unsigned NumRanges = NumOperands / 2;
const unsigned NewWidth = NewTy->getIntegerBitWidth();
- bool InRange = true;
- SmallVector<Metadata *, 4> Bounds;
- Bounds.reserve(NumOperands);
-
- for (unsigned i = 0; i < NumRanges; ++i) {
- const APInt &LowValue =
- mdconst::extract<ConstantInt>(OldRanges->getOperand(2 * i))
- ->getValue();
- const APInt &HighValue =
- mdconst::extract<ConstantInt>(OldRanges->getOperand(2 * i + 1))
- ->getValue();
- ConstantRange CurRange(LowValue, HighValue);
-
- if (CurRange.getMinSignedBits() > NewWidth) {
- InRange = false;
- break;
- }
- Bounds.push_back(ConstantAsMetadata::get(
- ConstantInt::get(NewTy, LowValue.trunc(NewWidth))));
- Bounds.push_back(ConstantAsMetadata::get(
- ConstantInt::get(NewTy, HighValue.trunc(NewWidth))));
- }
- if (InRange)
+ Metadata *Bounds[2];
+ /* Don't preserve all sub-ranges. Make one range that contains all
+ OldRanges. Use lower bound of first range and higher bound of last
+ range */
+ const APInt &LowValue =
+ mdconst::extract<ConstantInt>(OldRanges->getOperand(0))->getValue();
+ const APInt &HighValue =
+ mdconst::extract<ConstantInt>(OldRanges->getOperand(NumOperands - 1))
+ ->getValue();
+ ConstantRange CurRange(LowValue, HighValue);
+
+ if (CurRange.getMinSignedBits() <= NewWidth) {
+ Bounds[0] = ConstantAsMetadata::get(
+ ConstantInt::get(NewTy, LowValue.trunc(NewWidth)));
+ Bounds[1] = ConstantAsMetadata::get(
+ ConstantInt::get(NewTy, HighValue.trunc(NewWidth)));
NewRanges = MDNode::get(*DAG.getContext(), Bounds);
+ }
}
Load = DAG.getLoad(
VT, DL, LN0->getChain(), NewPtr,
>From 179869cea5f8ec61704fa30e4b59a1a585fa34b5 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Sat, 22 Feb 2025 23:26:07 -0600
Subject: [PATCH 7/7] Cleanly implement using ConstantRange utility functions
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 31 +++++++------------
1 file changed, 11 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ef8f4e4529311..60ca3bb5012b9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14906,29 +14906,20 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
if (ExtType == ISD::NON_EXTLOAD) {
const MDNode *OldRanges = LN0->getRanges();
const MDNode *NewRanges = nullptr;
- /* If LSBs are loaded and all bounds in the OldRanges metadata fit in
- the narrower size, preserve the range information by translating
- to the the new narrower type, NewTy */
+ /* If LSBs are loaded and the truncated ConstantRange for the OldRanges
+ metadata is not the full-set for the NewWidth then create a NewRanges
+ metadata for the truncated load */
if (ShAmt == 0 && OldRanges) {
Type *NewTy = VT.getTypeForEVT(*DAG.getContext());
- const unsigned NumOperands = OldRanges->getNumOperands();
const unsigned NewWidth = NewTy->getIntegerBitWidth();
- Metadata *Bounds[2];
- /* Don't preserve all sub-ranges. Make one range that contains all
- OldRanges. Use lower bound of first range and higher bound of last
- range */
- const APInt &LowValue =
- mdconst::extract<ConstantInt>(OldRanges->getOperand(0))->getValue();
- const APInt &HighValue =
- mdconst::extract<ConstantInt>(OldRanges->getOperand(NumOperands - 1))
- ->getValue();
- ConstantRange CurRange(LowValue, HighValue);
-
- if (CurRange.getMinSignedBits() <= NewWidth) {
- Bounds[0] = ConstantAsMetadata::get(
- ConstantInt::get(NewTy, LowValue.trunc(NewWidth)));
- Bounds[1] = ConstantAsMetadata::get(
- ConstantInt::get(NewTy, HighValue.trunc(NewWidth)));
+ const ConstantRange CR = getConstantRangeFromMetadata(*OldRanges);
+ const ConstantRange TruncatedCR = CR.truncate(NewWidth);
+
+ if (!TruncatedCR.isFullSet()) {
+ Metadata *Bounds[2] = {ConstantAsMetadata::get(ConstantInt::get(
+ NewTy, TruncatedCR.getLower())),
+ ConstantAsMetadata::get(ConstantInt::get(
+ NewTy, TruncatedCR.getUpper()))};
NewRanges = MDNode::get(*DAG.getContext(), Bounds);
}
}
More information about the llvm-commits
mailing list