[llvm] [GlobalISel][NFC] Rename GISelKnownBits to GISelValueTracking (PR #133466)
Tim Gymnich via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 28 10:08:41 PDT 2025
https://github.com/tgymnich updated https://github.com/llvm/llvm-project/pull/133466
>From 591475ff8c5f79e4e2e68ec4f995623a3ed88b2f Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 28 Mar 2025 16:43:24 +0000
Subject: [PATCH] [GlobalISel][NFC] Rename GISelKnownBits to GISelValueTracking
---
llvm/docs/GlobalISel/KnownBits.rst | 12 +-
.../llvm/CodeGen/GlobalISel/Combiner.h | 4 +-
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 11 +-
.../CodeGen/GlobalISel/GIMatchTableExecutor.h | 8 +-
...{GISelKnownBits.h => GISelValueTracking.h} | 42 +++--
.../CodeGen/GlobalISel/InstructionSelect.h | 4 +-
.../GlobalISel/LegalizationArtifactCombiner.h | 10 +-
.../llvm/CodeGen/GlobalISel/Legalizer.h | 4 +-
.../llvm/CodeGen/GlobalISel/LegalizerHelper.h | 8 +-
llvm/include/llvm/CodeGen/GlobalISel/Utils.h | 4 +-
llvm/include/llvm/CodeGen/TargetLowering.h | 14 +-
llvm/include/llvm/InitializePasses.h | 2 +-
.../include/llvm/Target/GlobalISel/Combine.td | 2 +-
llvm/lib/CodeGen/GlobalISel/CMakeLists.txt | 2 +-
llvm/lib/CodeGen/GlobalISel/Combiner.cpp | 6 +-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 59 +++----
...elKnownBits.cpp => GISelValueTracking.cpp} | 106 +++++++------
.../CodeGen/GlobalISel/InstructionSelect.cpp | 12 +-
llvm/lib/CodeGen/GlobalISel/Legalizer.cpp | 18 +--
.../CodeGen/GlobalISel/LegalizerHelper.cpp | 8 +-
llvm/lib/CodeGen/GlobalISel/Utils.cpp | 10 +-
.../CodeGen/SelectionDAG/TargetLowering.cpp | 10 +-
llvm/lib/Target/AArch64/AArch64Combine.td | 2 +-
.../GISel/AArch64InstructionSelector.cpp | 4 +-
.../GISel/AArch64O0PreLegalizerCombiner.cpp | 20 +--
.../GISel/AArch64PostLegalizerCombiner.cpp | 20 +--
.../GISel/AArch64PostLegalizerLowering.cpp | 2 +-
.../GISel/AArch64PreLegalizerCombiner.cpp | 26 ++--
.../Target/AMDGPU/AMDGPUCombinerHelper.cpp | 4 +-
llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h | 2 +-
.../Target/AMDGPU/AMDGPUGlobalISelUtils.cpp | 10 +-
.../lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h | 4 +-
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 7 +-
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 2 +-
.../AMDGPU/AMDGPUInstructionSelector.cpp | 41 ++---
.../Target/AMDGPU/AMDGPUInstructionSelector.h | 2 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 8 +-
.../AMDGPU/AMDGPUPostLegalizerCombiner.cpp | 30 ++--
.../AMDGPU/AMDGPUPreLegalizerCombiner.cpp | 20 +--
.../Target/AMDGPU/AMDGPURegBankCombiner.cpp | 20 +--
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 32 ++--
llvm/lib/Target/AMDGPU/SIISelLowering.h | 5 +-
.../Target/Mips/MipsPostLegalizerCombiner.cpp | 20 +--
.../Target/Mips/MipsPreLegalizerCombiner.cpp | 21 +--
.../RISCV/GISel/RISCVInstructionSelector.cpp | 12 +-
.../GISel/RISCVO0PreLegalizerCombiner.cpp | 20 +--
.../GISel/RISCVPostLegalizerCombiner.cpp | 22 +--
.../RISCV/GISel/RISCVPreLegalizerCombiner.cpp | 20 +--
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 7 +-
llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp | 4 +-
.../SPIRV/SPIRVPreLegalizerCombiner.cpp | 20 +--
.../CodeGen/GlobalISel/KnownBitsTest.cpp | 144 +++++++++---------
.../GlobalISel/KnownBitsVectorTest.cpp | 108 ++++++-------
.../CodeGen/GlobalISel/LegalizerTest.cpp | 14 +-
.../llvm/lib/CodeGen/GlobalISel/BUILD.gn | 2 +-
55 files changed, 517 insertions(+), 514 deletions(-)
rename llvm/include/llvm/CodeGen/GlobalISel/{GISelKnownBits.h => GISelValueTracking.h} (74%)
rename llvm/lib/CodeGen/GlobalISel/{GISelKnownBits.cpp => GISelValueTracking.cpp} (90%)
diff --git a/llvm/docs/GlobalISel/KnownBits.rst b/llvm/docs/GlobalISel/KnownBits.rst
index 7e628722d5323..c01faa5f08f0f 100644
--- a/llvm/docs/GlobalISel/KnownBits.rst
+++ b/llvm/docs/GlobalISel/KnownBits.rst
@@ -61,12 +61,12 @@ dependency with ``INITIALIZE_PASS_DEPENDENCY``.
.. code-block:: c++
- #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+ #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
...
INITIALIZE_PASS_BEGIN(...)
- INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+ INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(...)
and require the pass in ``getAnalysisUsage``.
@@ -74,10 +74,10 @@ and require the pass in ``getAnalysisUsage``.
.. code-block:: c++
void MyPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
// Optional: If your pass preserves known bits analysis (many do) then
// indicate that it's preserved for re-use by another pass here.
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
}
Then it's just a matter of fetching the analysis and using it:
@@ -86,10 +86,10 @@ Then it's just a matter of fetching the analysis and using it:
bool MyPass::runOnMachineFunction(MachineFunction &MF) {
...
- GISelKnownBits &KB = getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking &VT = getAnalysis<GISelValueTrackingAnalysis>().get(MF);
...
MachineInstr *MI = ...;
- KnownBits Known = KB->getKnownBits(MI->getOperand(0).getReg());
+ KnownBits Known = VT->getKnownBits(MI->getOperand(0).getReg());
if (Known.Zeros & 1) {
// Bit 0 is known to be zero
}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h b/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
index fa6a7be6cf6c3..39ff90c2687f4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -58,7 +58,7 @@ class Combiner : public GIMatchTableExecutor {
/// If CSEInfo is not null, then the Combiner will use CSEInfo as the observer
/// and also create a CSEMIRBuilder. Pass nullptr if CSE is not needed.
Combiner(MachineFunction &MF, CombinerInfo &CInfo,
- const TargetPassConfig *TPC, GISelKnownBits *KB,
+ const TargetPassConfig *TPC, GISelValueTracking *VT,
GISelCSEInfo *CSEInfo = nullptr);
virtual ~Combiner();
@@ -72,7 +72,7 @@ class Combiner : public GIMatchTableExecutor {
MachineIRBuilder &B;
MachineFunction &MF;
MachineRegisterInfo &MRI;
- GISelKnownBits *KB;
+ GISelValueTracking *VT;
const TargetPassConfig *TPC;
GISelCSEInfo *CSEInfo;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9b78342c8fc39..93b424d27fdf1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -38,7 +38,7 @@ class MachineInstrBuilder;
class MachineRegisterInfo;
class MachineInstr;
class MachineOperand;
-class GISelKnownBits;
+class GISelValueTracking;
class MachineDominatorTree;
class LegalizerInfo;
struct LegalityQuery;
@@ -106,7 +106,7 @@ class CombinerHelper {
MachineIRBuilder &Builder;
MachineRegisterInfo &MRI;
GISelChangeObserver &Observer;
- GISelKnownBits *KB;
+ GISelValueTracking *VT;
MachineDominatorTree *MDT;
bool IsPreLegalize;
const LegalizerInfo *LI;
@@ -115,14 +115,11 @@ class CombinerHelper {
public:
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
- bool IsPreLegalize,
- GISelKnownBits *KB = nullptr,
+ bool IsPreLegalize, GISelValueTracking *VT = nullptr,
MachineDominatorTree *MDT = nullptr,
const LegalizerInfo *LI = nullptr);
- GISelKnownBits *getKnownBits() const {
- return KB;
- }
+ GISelValueTracking *getValueTracking() const { return VT; }
MachineIRBuilder &getBuilder() const {
return Builder;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
index 073e9a77801cb..6a7c0edbf2ce0 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
@@ -41,7 +41,7 @@ class MachineBasicBlock;
class ProfileSummaryInfo;
class APInt;
class APFloat;
-class GISelKnownBits;
+class GISelValueTracking;
class MachineInstr;
class MachineIRBuilder;
class MachineInstrBuilder;
@@ -588,7 +588,7 @@ class GIMatchTableExecutor {
virtual ~GIMatchTableExecutor() = default;
CodeGenCoverage *CoverageInfo = nullptr;
- GISelKnownBits *KB = nullptr;
+ GISelValueTracking *VT = nullptr;
MachineFunction *MF = nullptr;
ProfileSummaryInfo *PSI = nullptr;
BlockFrequencyInfo *BFI = nullptr;
@@ -598,12 +598,12 @@ class GIMatchTableExecutor {
virtual void setupGeneratedPerFunctionState(MachineFunction &MF) = 0;
/// Setup per-MF executor state.
- virtual void setupMF(MachineFunction &mf, GISelKnownBits *kb,
+ virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt,
CodeGenCoverage *covinfo = nullptr,
ProfileSummaryInfo *psi = nullptr,
BlockFrequencyInfo *bfi = nullptr) {
CoverageInfo = covinfo;
- KB = kb;
+ VT = vt;
MF = &mf;
PSI = psi;
BFI = bfi;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
similarity index 74%
rename from llvm/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
rename to llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
index ea75c2c7f6f41..aa99bf321d2b1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GISelValueTracking.h
@@ -1,4 +1,4 @@
-//===- llvm/CodeGen/GlobalISel/GISelKnownBits.h ---------------*- C++ -*-===//
+//===- llvm/CodeGen/GlobalISel/GISelValueTracking.h -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H
-#define LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H
+#ifndef LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
+#define LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
@@ -26,7 +26,7 @@ namespace llvm {
class TargetLowering;
class DataLayout;
-class GISelKnownBits : public GISelChangeObserver {
+class GISelValueTracking : public GISelChangeObserver {
MachineFunction &MF;
MachineRegisterInfo &MRI;
const TargetLowering &TL;
@@ -36,23 +36,18 @@ class GISelKnownBits : public GISelChangeObserver {
SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;
void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth = 0);
+ const APInt &DemandedElts, unsigned Depth = 0);
unsigned computeNumSignBitsMin(Register Src0, Register Src1,
const APInt &DemandedElts, unsigned Depth = 0);
public:
- GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
- virtual ~GISelKnownBits() = default;
+ GISelValueTracking(MachineFunction &MF, unsigned MaxDepth = 6);
+ virtual ~GISelValueTracking() = default;
- const MachineFunction &getMachineFunction() const {
- return MF;
- }
+ const MachineFunction &getMachineFunction() const { return MF; }
- const DataLayout &getDataLayout() const {
- return DL;
- }
+ const DataLayout &getDataLayout() const { return DL; }
virtual void computeKnownBitsImpl(Register R, KnownBits &Known,
const APInt &DemandedElts,
@@ -83,8 +78,7 @@ class GISelKnownBits : public GISelChangeObserver {
/// predicate to simplify operations downstream.
bool signBitIsZero(Register Op);
- static void computeKnownBitsForAlignment(KnownBits &Known,
- Align Alignment) {
+ static void computeKnownBitsForAlignment(KnownBits &Known, Align Alignment) {
// The low bits are known zero if the pointer is aligned.
Known.Zero.setLowBits(Log2(Alignment));
}
@@ -103,26 +97,26 @@ class GISelKnownBits : public GISelChangeObserver {
};
/// To use KnownBitsInfo analysis in a pass,
-/// KnownBitsInfo &Info = getAnalysis<GISelKnownBitsInfoAnalysis>().get(MF);
+/// KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnalysis>().get(MF);
/// Add to observer if the Info is caching.
/// WrapperObserver.addObserver(Info);
/// Eventually add other features such as caching/ser/deserializing
-/// to MIR etc. Those implementations can derive from GISelKnownBits
+/// to MIR etc. Those implementations can derive from GISelValueTracking
/// and override computeKnownBitsImpl.
-class GISelKnownBitsAnalysis : public MachineFunctionPass {
- std::unique_ptr<GISelKnownBits> Info;
+class GISelValueTrackingAnalysis : public MachineFunctionPass {
+ std::unique_ptr<GISelValueTracking> Info;
public:
static char ID;
- GISelKnownBitsAnalysis() : MachineFunctionPass(ID) {
- initializeGISelKnownBitsAnalysisPass(*PassRegistry::getPassRegistry());
+ GISelValueTrackingAnalysis() : MachineFunctionPass(ID) {
+ initializeGISelValueTrackingAnalysisPass(*PassRegistry::getPassRegistry());
}
- GISelKnownBits &get(MachineFunction &MF);
+ GISelValueTracking &get(MachineFunction &MF);
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnMachineFunction(MachineFunction &MF) override;
void releaseMemory() override { Info.reset(); }
};
} // namespace llvm
-#endif // LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H
+#endif // LLVM_CODEGEN_GLOBALISEL_GISELVALUETRACKING_H
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
index a2f06e21a700e..75f683764d165 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -21,7 +21,7 @@
namespace llvm {
class InstructionSelector;
-class GISelKnownBits;
+class GISelValueTracking;
class BlockFrequencyInfo;
class ProfileSummaryInfo;
@@ -62,7 +62,7 @@ class InstructionSelect : public MachineFunctionPass {
class MIIteratorMaintainer;
InstructionSelector *ISel = nullptr;
- GISelKnownBits *KB = nullptr;
+ GISelValueTracking *VT = nullptr;
BlockFrequencyInfo *BFI = nullptr;
ProfileSummaryInfo *PSI = nullptr;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 0d2ff098a15e3..3712a7fa06d9a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -36,7 +36,7 @@ class LegalizationArtifactCombiner {
MachineIRBuilder &Builder;
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
- GISelKnownBits *KB;
+ GISelValueTracking *VT;
static bool isArtifactCast(unsigned Opc) {
switch (Opc) {
@@ -53,8 +53,8 @@ class LegalizationArtifactCombiner {
public:
LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
const LegalizerInfo &LI,
- GISelKnownBits *KB = nullptr)
- : Builder(B), MRI(MRI), LI(LI), KB(KB) {}
+ GISelValueTracking *VT = nullptr)
+ : Builder(B), MRI(MRI), LI(LI), VT(VT) {}
bool tryCombineAnyExt(MachineInstr &MI,
SmallVectorImpl<MachineInstr *> &DeadInsts,
@@ -151,7 +151,7 @@ class LegalizationArtifactCombiner {
// OptLevel results in significant compile-time and O0 code-size
// improvements. Inserting unnecessary instructions between boolean defs
// and uses hinders a lot of folding during ISel.
- if (KB && (KB->getKnownZeroes(AndSrc) | ExtMaskVal).isAllOnes()) {
+ if (VT && (VT->getKnownZeroes(AndSrc) | ExtMaskVal).isAllOnes()) {
replaceRegOrBuildCopy(DstReg, AndSrc, MRI, Builder, UpdatedDefs,
Observer);
} else {
@@ -214,7 +214,7 @@ class LegalizationArtifactCombiner {
TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
// Elide G_SEXT_INREG if possible. This is similar to eliding G_AND in
// tryCombineZExt. Refer to the comment in tryCombineZExt for rationale.
- if (KB && KB->computeNumSignBits(TruncSrc) >
+ if (VT && VT->computeNumSignBits(TruncSrc) >
DstTy.getScalarSizeInBits() - SizeInBits)
replaceRegOrBuildCopy(DstReg, TruncSrc, MRI, Builder, UpdatedDefs,
Observer);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h b/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
index e232ab0fb3fe5..15bef84632b7a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -22,7 +22,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -77,7 +77,7 @@ class Legalizer : public MachineFunctionPass {
legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
ArrayRef<GISelChangeObserver *> AuxObservers,
LostDebugLocObserver &LocObserver,
- MachineIRBuilder &MIRBuilder, GISelKnownBits *KB);
+ MachineIRBuilder &MIRBuilder, GISelValueTracking *VT);
};
} // End namespace llvm.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 4e18f5cc913a7..428f08e5a7b28 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -21,7 +21,7 @@
#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/TargetOpcodes.h"
@@ -58,7 +58,7 @@ class LegalizerHelper {
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
const TargetLowering &TLI;
- GISelKnownBits *KB;
+ GISelValueTracking *VT;
public:
enum LegalizeResult {
@@ -77,13 +77,13 @@ class LegalizerHelper {
/// Expose LegalizerInfo so the clients can re-use.
const LegalizerInfo &getLegalizerInfo() const { return LI; }
const TargetLowering &getTargetLowering() const { return TLI; }
- GISelKnownBits *getKnownBits() const { return KB; }
+ GISelValueTracking *getValueTracking() const { return VT; }
LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
MachineIRBuilder &B);
LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
GISelChangeObserver &Observer, MachineIRBuilder &B,
- GISelKnownBits *KB = nullptr);
+ GISelValueTracking *VT = nullptr);
/// Replace \p MI by a sequence of legal instructions that can implement the
/// same operation. Note that this means \p MI may be deleted, so any iterator
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index a35ecae5d18bf..44141844f42f4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -31,7 +31,7 @@ class AnalysisUsage;
class LostDebugLocObserver;
class MachineBasicBlock;
class BlockFrequencyInfo;
-class GISelKnownBits;
+class GISelValueTracking;
class MachineFunction;
class MachineInstr;
class MachineIRBuilder;
@@ -331,7 +331,7 @@ ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
/// from computeKnownBits in that it doesn't necessarily determine which bit is
/// set.
bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
- GISelKnownBits *KnownBits = nullptr);
+ GISelValueTracking *ValueTracking = nullptr);
/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
/// this returns if \p Val can be assumed to never be a signaling NaN.
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 58ac87206b9a6..29bf1d467ae0e 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -73,7 +73,7 @@ class FastISel;
class FunctionLoweringInfo;
class GlobalValue;
class Loop;
-class GISelKnownBits;
+class GISelValueTracking;
class IntrinsicInst;
class IRBuilderBase;
struct KnownBits;
@@ -4159,7 +4159,7 @@ class TargetLowering : public TargetLoweringBase {
/// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
/// argument allows us to only collect the known bits that are shared by the
/// requested vector elements. This is for GISel.
- virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
+ virtual void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis,
Register R, KnownBits &Known,
const APInt &DemandedElts,
const MachineRegisterInfo &MRI,
@@ -4169,7 +4169,7 @@ class TargetLowering : public TargetLoweringBase {
/// typically be inferred from the number of low known 0 bits. However, for a
/// pointer with a non-integral address space, the alignment value may be
/// independent from the known low bits.
- virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
+ virtual Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis,
Register R,
const MachineRegisterInfo &MRI,
unsigned Depth = 0) const;
@@ -4194,11 +4194,9 @@ class TargetLowering : public TargetLoweringBase {
/// information about sign bits to GlobalISel combiners. The DemandedElts
/// argument allows us to only collect the minimum sign bits that are shared
/// by the requested vector elements.
- virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
- Register R,
- const APInt &DemandedElts,
- const MachineRegisterInfo &MRI,
- unsigned Depth = 0) const;
+ virtual unsigned computeNumSignBitsForTargetInstr(
+ GISelValueTracking &Analysis, Register R, const APInt &DemandedElts,
+ const MachineRegisterInfo &MRI, unsigned Depth = 0) const;
/// Attempt to simplify any target nodes based on the demanded vector
/// elements, returning true on success. Otherwise, analyze the expression and
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index e820277724393..8363fba8b1f3a 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -152,7 +152,7 @@ void initializeLazyValueInfoWrapperPassPass(PassRegistry &);
void initializeLegacyLICMPassPass(PassRegistry &);
void initializeLegalizerPass(PassRegistry &);
void initializeGISelCSEAnalysisWrapperPassPass(PassRegistry &);
-void initializeGISelKnownBitsAnalysisPass(PassRegistry &);
+void initializeGISelValueTrackingAnalysisPass(PassRegistry &);
void initializeLiveDebugValuesLegacyPass(PassRegistry &);
void initializeLiveDebugVariablesWrapperLegacyPass(PassRegistry &);
void initializeLiveIntervalsWrapperPassPass(PassRegistry &);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 660b03080f92e..1a967fe56b7b0 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -271,7 +271,7 @@ def sext_inreg_to_zext_inreg : GICombineRule<
(G_SEXT_INREG $dst, $src, $imm):$root,
[{
unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
- return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(),
+ return Helper.getValueTracking()->maskedValueIsZero(${src}.getReg(),
APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
(apply [{
Helper.getBuilder().setInstrAndDebugLoc(*${root});
diff --git a/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt b/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
index a45024d120be6..554a2367eb835 100644
--- a/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/lib/CodeGen/GlobalISel/CMakeLists.txt
@@ -1,6 +1,6 @@
add_llvm_component_library(LLVMGlobalISel
CSEInfo.cpp
- GISelKnownBits.cpp
+ GISelValueTracking.cpp
CSEMIRBuilder.cpp
CallLowering.cpp
GlobalISel.cpp
diff --git a/llvm/lib/CodeGen/GlobalISel/Combiner.cpp b/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
index c5ec73cd5c65d..733ac41b8c7a1 100644
--- a/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Combiner.cpp
@@ -222,7 +222,7 @@ Combiner::WorkListMaintainer::create(Level Lvl, WorkListTy &WorkList,
}
Combiner::Combiner(MachineFunction &MF, CombinerInfo &CInfo,
- const TargetPassConfig *TPC, GISelKnownBits *KB,
+ const TargetPassConfig *TPC, GISelValueTracking *VT,
GISelCSEInfo *CSEInfo)
: Builder(CSEInfo ? std::make_unique<CSEMIRBuilder>()
: std::make_unique<MachineIRBuilder>()),
@@ -230,7 +230,7 @@ Combiner::Combiner(MachineFunction &MF, CombinerInfo &CInfo,
MF.getRegInfo())),
ObserverWrapper(std::make_unique<GISelObserverWrapper>()), CInfo(CInfo),
Observer(*ObserverWrapper), B(*Builder), MF(MF), MRI(MF.getRegInfo()),
- KB(KB), TPC(TPC), CSEInfo(CSEInfo) {
+ VT(VT), TPC(TPC), CSEInfo(CSEInfo) {
(void)this->TPC; // FIXME: Remove when used.
// Setup builder.
@@ -263,7 +263,7 @@ bool Combiner::combineMachineInstrs() {
// uninitialized at that time.
if (!HasSetupMF) {
HasSetupMF = true;
- setupMF(MF, KB);
+ setupMF(MF, VT);
}
LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 3a5da379a9c49..bab0c1596ca40 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -12,7 +12,7 @@
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Analysis/CmpInstAnalysis.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
@@ -55,13 +55,14 @@ static cl::opt<bool>
CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
MachineIRBuilder &B, bool IsPreLegalize,
- GISelKnownBits *KB, MachineDominatorTree *MDT,
+ GISelValueTracking *VT,
+ MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
+ : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), VT(VT),
MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
- (void)this->KB;
+ (void)this->VT;
}
const TargetLowering &CombinerHelper::getTargetLowering() const {
@@ -2081,7 +2082,7 @@ bool CombinerHelper::matchCombineSubToAdd(MachineInstr &MI,
// shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
RegisterImmPair &MatchData) const {
- assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
+ assert(MI.getOpcode() == TargetOpcode::G_SHL && VT);
if (!getTargetLowering().isDesirableToPullExtFromShl(MI))
return false;
@@ -2114,7 +2115,7 @@ bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
MatchData.Reg = ExtSrc;
MatchData.Imm = ShiftAmt;
- unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one();
+ unsigned MinLeadingZeros = VT->getKnownZeroes(ExtSrc).countl_one();
unsigned SrcTySize = MRI.getType(ExtSrc).getScalarSizeInBits();
return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
}
@@ -2582,7 +2583,7 @@ bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI,
canReplaceReg(DstReg, Reg, MRI)) {
unsigned DstSize = DstTy.getScalarSizeInBits();
unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
- return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
+ return VT->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
}
return false;
}
@@ -2627,7 +2628,7 @@ bool CombinerHelper::matchCombineTruncOfShift(
NewShiftTy = DstTy;
// Make sure new shift amount is legal.
- KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
+ KnownBits Known = VT->getKnownBits(SrcMI->getOperand(2).getReg());
if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits()))
return false;
break;
@@ -2648,7 +2649,7 @@ bool CombinerHelper::matchCombineTruncOfShift(
return false;
// Make sure we won't lose information by truncating the high bits.
- KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
+ KnownBits Known = VT->getKnownBits(SrcMI->getOperand(2).getReg());
if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() -
DstTy.getScalarSizeInBits()))
return false;
@@ -2958,7 +2959,7 @@ bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
unsigned OpIdx) const {
MachineOperand &MO = MI.getOperand(OpIdx);
- return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
+ return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT);
}
void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
@@ -3284,7 +3285,7 @@ bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
//
// In this case, G_ICMP only produces a single bit, so x & 1 == x.
assert(MI.getOpcode() == TargetOpcode::G_AND);
- if (!KB)
+ if (!VT)
return false;
Register AndDst = MI.getOperand(0).getReg();
@@ -3294,11 +3295,11 @@ bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
// Check the RHS (maybe a constant) first, and if we have no KnownBits there,
// we can't do anything. If we do, then it depends on whether we have
// KnownBits on the LHS.
- KnownBits RHSBits = KB->getKnownBits(RHS);
+ KnownBits RHSBits = VT->getKnownBits(RHS);
if (RHSBits.isUnknown())
return false;
- KnownBits LHSBits = KB->getKnownBits(LHS);
+ KnownBits LHSBits = VT->getKnownBits(LHS);
// Check that x & Mask == x.
// x & 1 == x, always
@@ -3332,15 +3333,15 @@ bool CombinerHelper::matchRedundantOr(MachineInstr &MI,
//
// Eliminate the G_OR when it is known that x | y == x or x | y == y.
assert(MI.getOpcode() == TargetOpcode::G_OR);
- if (!KB)
+ if (!VT)
return false;
Register OrDst = MI.getOperand(0).getReg();
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
- KnownBits LHSBits = KB->getKnownBits(LHS);
- KnownBits RHSBits = KB->getKnownBits(RHS);
+ KnownBits LHSBits = VT->getKnownBits(LHS);
+ KnownBits RHSBits = VT->getKnownBits(RHS);
// Check that x | Mask == x.
// x | 0 == x, always
@@ -3369,7 +3370,7 @@ bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) const {
Register Src = MI.getOperand(1).getReg();
unsigned ExtBits = MI.getOperand(2).getImm();
unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
- return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
+ return VT->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
}
static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
@@ -4453,7 +4454,7 @@ bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
// we cannot do any transforms so we can safely bail out early.
// - The RHS is zero: we don't need to know the LHS to do unsigned <0 and
// >=0.
- auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
+ auto KnownRHS = VT->getKnownBits(MI.getOperand(3).getReg());
if (KnownRHS.isUnknown())
return false;
@@ -4468,7 +4469,7 @@ bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
}
if (!KnownVal) {
- auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
+ auto KnownLHS = VT->getKnownBits(MI.getOperand(2).getReg());
KnownVal = ICmpInst::compare(KnownLHS, KnownRHS, Pred);
}
@@ -4511,7 +4512,7 @@ bool CombinerHelper::matchICmpToLHSKnownBits(
if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
return false;
Register LHS = MI.getOperand(2).getReg();
- auto KnownLHS = KB->getKnownBits(LHS);
+ auto KnownLHS = VT->getKnownBits(LHS);
if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
return false;
// Make sure replacing Dst with the LHS is a legal operation.
@@ -5309,7 +5310,7 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) const {
}
unsigned KnownLeadingZeros =
- KB ? KB->getKnownBits(LHS).countMinLeadingZeros() : 0;
+ VT ? VT->getKnownBits(LHS).countMinLeadingZeros() : 0;
bool UseNPQ = false;
SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
@@ -6645,7 +6646,7 @@ bool CombinerHelper::matchShiftsTooBig(
MatchInfo = std::nullopt;
return true;
}
- auto OptMaxUsefulShift = getMinUselessShift(KB->getKnownBits(ShiftVal),
+ auto OptMaxUsefulShift = getMinUselessShift(VT->getKnownBits(ShiftVal),
MI.getOpcode(), MatchInfo);
return OptMaxUsefulShift && CI->uge(*OptMaxUsefulShift);
};
@@ -7518,9 +7519,9 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI,
// We try to combine uaddo to non-overflowing add.
if (!IsSigned) {
ConstantRange CRLHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(LHS), /*IsSigned=*/false);
+ ConstantRange::fromKnownBits(VT->getKnownBits(LHS), /*IsSigned=*/false);
ConstantRange CRRHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(RHS), /*IsSigned=*/false);
+ ConstantRange::fromKnownBits(VT->getKnownBits(RHS), /*IsSigned=*/false);
switch (CRLHS.unsignedAddMayOverflow(CRRHS)) {
case ConstantRange::OverflowResult::MayOverflow:
@@ -7548,7 +7549,7 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI,
// If LHS and RHS each have at least two sign bits, then there is no signed
// overflow.
- if (KB->computeNumSignBits(RHS) > 1 && KB->computeNumSignBits(LHS) > 1) {
+ if (VT->computeNumSignBits(RHS) > 1 && VT->computeNumSignBits(LHS) > 1) {
MatchInfo = [=](MachineIRBuilder &B) {
B.buildAdd(Dst, LHS, RHS, MachineInstr::MIFlag::NoSWrap);
B.buildConstant(Carry, 0);
@@ -7557,9 +7558,9 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI,
}
ConstantRange CRLHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(LHS), /*IsSigned=*/true);
+ ConstantRange::fromKnownBits(VT->getKnownBits(LHS), /*IsSigned=*/true);
ConstantRange CRRHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(RHS), /*IsSigned=*/true);
+ ConstantRange::fromKnownBits(VT->getKnownBits(RHS), /*IsSigned=*/true);
switch (CRLHS.signedAddMayOverflow(CRRHS)) {
case ConstantRange::OverflowResult::MayOverflow:
@@ -7951,10 +7952,10 @@ bool CombinerHelper::matchSuboCarryOut(const MachineInstr &MI,
return false;
ConstantRange KBLHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(LHS),
+ ConstantRange::fromKnownBits(VT->getKnownBits(LHS),
/* IsSigned=*/Subo->isSigned());
ConstantRange KBRHS =
- ConstantRange::fromKnownBits(KB->getKnownBits(RHS),
+ ConstantRange::fromKnownBits(VT->getKnownBits(RHS),
/* IsSigned=*/Subo->isSigned());
if (Subo->isSigned()) {
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
similarity index 90%
rename from llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
rename to llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
index 6c15ed3423d3b..12fe28b29e5c8 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelValueTracking.cpp
@@ -1,4 +1,5 @@
-//===- lib/CodeGen/GlobalISel/GISelKnownBits.cpp --------------*- C++ *-===//
+//===- lib/CodeGen/GlobalISel/GISelValueTracking.cpp --------------*- C++
+//*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -10,7 +11,7 @@
/// passes.
//
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
@@ -26,16 +27,16 @@
using namespace llvm;
-char llvm::GISelKnownBitsAnalysis::ID = 0;
+char llvm::GISelValueTrackingAnalysis::ID = 0;
-INITIALIZE_PASS(GISelKnownBitsAnalysis, DEBUG_TYPE,
+INITIALIZE_PASS(GISelValueTrackingAnalysis, DEBUG_TYPE,
"Analysis for ComputingKnownBits", false, true)
-GISelKnownBits::GISelKnownBits(MachineFunction &MF, unsigned MaxDepth)
+GISelValueTracking::GISelValueTracking(MachineFunction &MF, unsigned MaxDepth)
: MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
DL(MF.getFunction().getDataLayout()), MaxDepth(MaxDepth) {}
-Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) {
+Align GISelValueTracking::computeKnownAlignment(Register R, unsigned Depth) {
const MachineInstr *MI = MRI.getVRegDef(R);
switch (MI->getOpcode()) {
case TargetOpcode::COPY:
@@ -57,13 +58,13 @@ Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) {
}
}
-KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) {
+KnownBits GISelValueTracking::getKnownBits(MachineInstr &MI) {
assert(MI.getNumExplicitDefs() == 1 &&
"expected single return generic instruction");
return getKnownBits(MI.getOperand(0).getReg());
}
-KnownBits GISelKnownBits::getKnownBits(Register R) {
+KnownBits GISelValueTracking::getKnownBits(Register R) {
const LLT Ty = MRI.getType(R);
// Since the number of lanes in a scalable vector is unknown at compile time,
// we track one bit which is implicitly broadcast to all lanes. This means
@@ -73,8 +74,9 @@ KnownBits GISelKnownBits::getKnownBits(Register R) {
return getKnownBits(R, DemandedElts);
}
-KnownBits GISelKnownBits::getKnownBits(Register R, const APInt &DemandedElts,
- unsigned Depth) {
+KnownBits GISelValueTracking::getKnownBits(Register R,
+ const APInt &DemandedElts,
+ unsigned Depth) {
// For now, we only maintain the cache during one request.
assert(ComputeKnownBitsCache.empty() && "Cache should have been cleared");
@@ -84,17 +86,19 @@ KnownBits GISelKnownBits::getKnownBits(Register R, const APInt &DemandedElts,
return Known;
}
-bool GISelKnownBits::signBitIsZero(Register R) {
+bool GISelValueTracking::signBitIsZero(Register R) {
LLT Ty = MRI.getType(R);
unsigned BitWidth = Ty.getScalarSizeInBits();
return maskedValueIsZero(R, APInt::getSignMask(BitWidth));
}
-APInt GISelKnownBits::getKnownZeroes(Register R) {
+APInt GISelValueTracking::getKnownZeroes(Register R) {
return getKnownBits(R).Zero;
}
-APInt GISelKnownBits::getKnownOnes(Register R) { return getKnownBits(R).One; }
+APInt GISelValueTracking::getKnownOnes(Register R) {
+ return getKnownBits(R).One;
+}
LLVM_ATTRIBUTE_UNUSED static void
dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
@@ -108,10 +112,10 @@ dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
}
/// Compute known bits for the intersection of \p Src0 and \p Src1
-void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1,
- KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth) {
+void GISelValueTracking::computeKnownBitsMin(Register Src0, Register Src1,
+ KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth) {
// Test src1 first, since we canonicalize simpler expressions to the RHS.
computeKnownBitsImpl(Src1, Known, DemandedElts, Depth);
@@ -140,9 +144,9 @@ static KnownBits extractBits(unsigned BitWidth, const KnownBits &SrcOpKnown,
return KnownBits::lshr(SrcOpKnown, OffsetKnown) & Mask;
}
-void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth) {
+void GISelValueTracking::computeKnownBitsImpl(Register R, KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth) {
MachineInstr &MI = *MRI.getVRegDef(R);
unsigned Opcode = MI.getOpcode();
LLT DstTy = MRI.getType(R);
@@ -178,12 +182,12 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
Known = KnownBits(BitWidth); // Don't know anything
// Depth may get bigger than max depth if it gets passed to a different
- // GISelKnownBits object.
- // This may happen when say a generic part uses a GISelKnownBits object
+ // GISelValueTracking object.
+ // This may happen when say a generic part uses a GISelValueTracking object
// with some max depth, but then we hit TL.computeKnownBitsForTargetInstr
- // which creates a new GISelKnownBits object with a different and smaller
+ // which creates a new GISelValueTracking object with a different and smaller
// depth. If we just check for equality, we would never exit if the depth
- // that is passed down to the target specific GISelKnownBits object is
+ // that is passed down to the target specific GISelValueTracking object is
// already bigger than its max depth.
if (Depth >= getMaxDepth())
return;
@@ -200,7 +204,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
break;
case TargetOpcode::G_BUILD_VECTOR: {
// Collect the known bits that are shared by every demanded vector element.
- Known.Zero.setAllBits(); Known.One.setAllBits();
+ Known.Zero.setAllBits();
+ Known.One.setAllBits();
for (unsigned i = 0, e = MI.getNumOperands() - 1; i < e; ++i) {
if (!DemandedElts[i])
continue;
@@ -365,19 +370,19 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
case TargetOpcode::G_UMIN: {
KnownBits KnownRHS;
- computeKnownBitsImpl(MI.getOperand(1).getReg(), Known,
- DemandedElts, Depth + 1);
- computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS,
- DemandedElts, Depth + 1);
+ computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
+ Depth + 1);
+ computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS, DemandedElts,
+ Depth + 1);
Known = KnownBits::umin(Known, KnownRHS);
break;
}
case TargetOpcode::G_UMAX: {
KnownBits KnownRHS;
- computeKnownBitsImpl(MI.getOperand(1).getReg(), Known,
- DemandedElts, Depth + 1);
- computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS,
- DemandedElts, Depth + 1);
+ computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
+ Depth + 1);
+ computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS, DemandedElts,
+ Depth + 1);
Known = KnownBits::umax(Known, KnownRHS);
break;
}
@@ -557,8 +562,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
case TargetOpcode::G_CTPOP: {
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
Depth + 1);
- // We can bound the space the count needs. Also, bits known to be zero can't
- // contribute to the population.
+ // We can bound the space the count needs. Also, bits known to be zero
+ // can't contribute to the population.
unsigned BitsPossiblySet = Known2.countMaxPopulation();
unsigned LowBits = llvm::bit_width(BitsPossiblySet);
Known.Zero.setBitsFrom(LowBits);
@@ -633,9 +638,9 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
/// Compute number of sign bits for the intersection of \p Src0 and \p Src1
-unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1,
- const APInt &DemandedElts,
- unsigned Depth) {
+unsigned GISelValueTracking::computeNumSignBitsMin(Register Src0, Register Src1,
+ const APInt &DemandedElts,
+ unsigned Depth) {
// Test src1 first, since we canonicalize simpler expressions to the RHS.
unsigned Src1SignBits = computeNumSignBits(Src1, DemandedElts, Depth);
if (Src1SignBits == 1)
@@ -670,9 +675,9 @@ static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld,
CR.getSignedMax().getNumSignBits());
}
-unsigned GISelKnownBits::computeNumSignBits(Register R,
- const APInt &DemandedElts,
- unsigned Depth) {
+unsigned GISelValueTracking::computeNumSignBits(Register R,
+ const APInt &DemandedElts,
+ unsigned Depth) {
MachineInstr &MI = *MRI.getVRegDef(R);
unsigned Opcode = MI.getOpcode();
@@ -719,7 +724,8 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
Register Src = MI.getOperand(1).getReg();
unsigned SrcBits = MI.getOperand(2).getImm();
unsigned InRegBits = TyBits - SrcBits + 1;
- return std::max(computeNumSignBits(Src, DemandedElts, Depth + 1), InRegBits);
+ return std::max(computeNumSignBits(Src, DemandedElts, Depth + 1),
+ InRegBits);
}
case TargetOpcode::G_LOAD: {
GLoad *Ld = cast<GLoad>(&MI);
@@ -836,7 +842,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
default: {
unsigned NumBits =
- TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);
+ TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);
if (NumBits > 1)
FirstAnswer = std::max(FirstAnswer, NumBits);
break;
@@ -847,9 +853,9 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
// use this information.
KnownBits Known = getKnownBits(R, DemandedElts, Depth);
APInt Mask;
- if (Known.isNonNegative()) { // sign bit is 0
+ if (Known.isNonNegative()) { // sign bit is 0
Mask = Known.Zero;
- } else if (Known.isNegative()) { // sign bit is 1;
+ } else if (Known.isNegative()) { // sign bit is 1;
Mask = Known.One;
} else {
// Nothing known.
@@ -862,27 +868,27 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
return std::max(FirstAnswer, Mask.countl_one());
}
-unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned Depth) {
+unsigned GISelValueTracking::computeNumSignBits(Register R, unsigned Depth) {
LLT Ty = MRI.getType(R);
APInt DemandedElts =
Ty.isVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
return computeNumSignBits(R, DemandedElts, Depth);
}
-void GISelKnownBitsAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+void GISelValueTrackingAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool GISelKnownBitsAnalysis::runOnMachineFunction(MachineFunction &MF) {
+bool GISelValueTrackingAnalysis::runOnMachineFunction(MachineFunction &MF) {
return false;
}
-GISelKnownBits &GISelKnownBitsAnalysis::get(MachineFunction &MF) {
+GISelValueTracking &GISelValueTrackingAnalysis::get(MachineFunction &MF) {
if (!Info) {
unsigned MaxDepth =
MF.getTarget().getOptLevel() == CodeGenOptLevel::None ? 2 : 6;
- Info = std::make_unique<GISelKnownBits>(MF, MaxDepth);
+ Info = std::make_unique<GISelValueTracking>(MF, MaxDepth);
}
return *Info;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 9185a7d1eca91..5842f204febf2 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -16,7 +16,7 @@
#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
@@ -56,7 +56,7 @@ INITIALIZE_PASS_BEGIN(InstructionSelect, DEBUG_TYPE,
"Select target instructions out of generic instructions",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
INITIALIZE_PASS_END(InstructionSelect, DEBUG_TYPE,
@@ -120,8 +120,8 @@ class InstructionSelect::MIIteratorMaintainer : public GISelChangeObserver {
void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (OptLevel != CodeGenOptLevel::None) {
AU.addRequired<ProfileSummaryInfoWrapperPass>();
@@ -146,7 +146,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
OptLevel = MF.getFunction().hasOptNone() ? CodeGenOptLevel::None
: MF.getTarget().getOptLevel();
- KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
if (OptLevel != CodeGenOptLevel::None) {
PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
if (PSI && PSI->hasProfileSummary())
@@ -162,7 +162,7 @@ bool InstructionSelect::selectMachineFunction(MachineFunction &MF) {
const TargetPassConfig &TPC = *ISel->TPC;
CodeGenCoverage CoverageInfo;
- ISel->setupMF(MF, KB, &CoverageInfo, PSI, BFI);
+ ISel->setupMF(MF, VT, &CoverageInfo, PSI, BFI);
// An optimization remark emitter. Used to report failures.
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
diff --git a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index ef1c54e6cef13..e4bce16f230b8 100644
--- a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -18,7 +18,7 @@
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
@@ -75,7 +75,7 @@ INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE,
"Legalize the Machine IR a function's Machine IR", false,
false)
@@ -86,8 +86,8 @@ void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
AU.addPreserved<GISelCSEAnalysisWrapperPass>();
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -177,7 +177,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
ArrayRef<GISelChangeObserver *> AuxObservers,
LostDebugLocObserver &LocObserver,
MachineIRBuilder &MIRBuilder,
- GISelKnownBits *KB) {
+ GISelValueTracking *VT) {
MIRBuilder.setMF(MF);
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -216,8 +216,8 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
// Now install the observer as the delegate to MF.
// This will keep all the observers notified about new insertions/deletions.
RAIIMFObsDelInstaller Installer(MF, WrapperObserver);
- LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder, KB);
- LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI, KB);
+ LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder, VT);
+ LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI, VT);
bool Changed = false;
SmallVector<MachineInstr *, 128> RetryList;
do {
@@ -341,11 +341,11 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
AuxObservers.push_back(&LocObserver);
// This allows Known Bits Analysis in the legalizer.
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
const LegalizerInfo &LI = *MF.getSubtarget().getLegalizerInfo();
MFResult Result = legalizeMachineFunction(MF, LI, AuxObservers, LocObserver,
- *MIRBuilder, KB);
+ *MIRBuilder, VT);
if (Result.FailedOn) {
reportGISelFailure(MF, TPC, MORE, "gisel-legalize",
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index a9f80860124fb..c28f3c5518301 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -15,7 +15,7 @@
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
@@ -107,13 +107,13 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF,
MachineIRBuilder &Builder)
: MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()),
LI(*MF.getSubtarget().getLegalizerInfo()),
- TLI(*MF.getSubtarget().getTargetLowering()), KB(nullptr) {}
+ TLI(*MF.getSubtarget().getTargetLowering()), VT(nullptr) {}
LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
GISelChangeObserver &Observer,
- MachineIRBuilder &B, GISelKnownBits *KB)
+ MachineIRBuilder &B, GISelValueTracking *VT)
: MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI),
- TLI(*MF.getSubtarget().getTargetLowering()), KB(KB) {}
+ TLI(*MF.getSubtarget().getTargetLowering()), VT(VT) {}
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 625d556e3ff5e..223d69c362185 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -15,7 +15,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/CodeGenCommonISel.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
@@ -1094,7 +1094,7 @@ llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
}
bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
- GISelKnownBits *KB) {
+ GISelValueTracking *VT) {
std::optional<DefinitionAndSourceRegister> DefSrcReg =
getDefSrcRegIgnoringCopies(Reg, MRI);
if (!DefSrcReg)
@@ -1133,7 +1133,7 @@ bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
// TODO: Probably should have a recursion depth guard since you could have
// bitcasted vector elements.
for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
- if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
+ if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, VT))
return false;
return true;
@@ -1154,14 +1154,14 @@ bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
break;
}
- if (!KB)
+ if (!VT)
return false;
// More could be done here, though the above checks are enough
// to handle some common cases.
// Fall back to computeKnownBits to catch other known cases.
- KnownBits Known = KB->getKnownBits(Reg);
+ KnownBits Known = VT->getKnownBits(Reg);
return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f1649a3903fac..10006a9d76785 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3779,7 +3779,7 @@ void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
void TargetLowering::computeKnownBitsForTargetInstr(
- GISelKnownBits &Analysis, Register R, KnownBits &Known,
+ GISelValueTracking &Analysis, Register R, KnownBits &Known,
const APInt &DemandedElts, const MachineRegisterInfo &MRI,
unsigned Depth) const {
Known.resetAll();
@@ -3792,8 +3792,8 @@ void TargetLowering::computeKnownBitsForFrameIndex(
}
Align TargetLowering::computeKnownAlignForTargetInstr(
- GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI,
- unsigned Depth) const {
+ GISelValueTracking &Analysis, Register R, const MachineRegisterInfo &MRI,
+ unsigned Depth) const {
return Align(1);
}
@@ -3813,8 +3813,8 @@ unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
}
unsigned TargetLowering::computeNumSignBitsForTargetInstr(
- GISelKnownBits &Analysis, Register R, const APInt &DemandedElts,
- const MachineRegisterInfo &MRI, unsigned Depth) const {
+ GISelValueTracking &Analysis, Register R, const APInt &DemandedElts,
+ const MachineRegisterInfo &MRI, unsigned Depth) const {
return 1;
}
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index ce1980697abbb..7c395a9e01ee5 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -21,7 +21,7 @@ def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
def icmp_redundant_trunc : GICombineRule<
(defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
(match (G_ICMP $dst, $tst, $src1, $src2):$root,
- [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
+ [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getValueTracking(), ${matchinfo}); }]),
(apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
// AArch64-specific offset folding for G_GLOBAL_VALUE.
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 67a08e39fe879..2afd24555b28c 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -75,10 +75,10 @@ class AArch64InstructionSelector : public InstructionSelector {
bool select(MachineInstr &I) override;
static const char *getName() { return DEBUG_TYPE; }
- void setupMF(MachineFunction &MF, GISelKnownBits *KB,
+ void setupMF(MachineFunction &MF, GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) override {
- InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
+ InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
MIB.setMF(MF);
// hasFnAttribute() is expensive to call on every BRCOND selection, so
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
index d76918b913984..4289066234420 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
@@ -17,7 +17,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -48,7 +48,7 @@ class AArch64O0PreLegalizerCombinerImpl : public Combiner {
public:
AArch64O0PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI);
@@ -70,11 +70,11 @@ class AArch64O0PreLegalizerCombinerImpl : public Combiner {
AArch64O0PreLegalizerCombinerImpl::AArch64O0PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB), RuleConfig(RuleConfig),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT), RuleConfig(RuleConfig),
STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AArch64GenO0PreLegalizeGICombiner.inc"
@@ -135,8 +135,8 @@ void AArch64O0PreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -155,7 +155,7 @@ bool AArch64O0PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
auto &TPC = getAnalysis<TargetPassConfig>();
const Function &F = MF.getFunction();
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
const AArch64Subtarget &ST = MF.getSubtarget<AArch64Subtarget>();
@@ -166,7 +166,7 @@ bool AArch64O0PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// at the cost of possibly missing optimizations. See PR#94291 for details.
CInfo.MaxIterations = 1;
- AArch64O0PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *KB,
+ AArch64O0PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT,
/*CSEInfo*/ nullptr, RuleConfig, ST);
return Impl.combineMachineInstrs();
}
@@ -176,7 +176,7 @@ INITIALIZE_PASS_BEGIN(AArch64O0PreLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 machine instrs before legalization",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
INITIALIZE_PASS_END(AArch64O0PreLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 machine instrs before legalization", false,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index cf6b2ce9c5341..d4a14f8756304 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -28,7 +28,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -447,7 +447,7 @@ class AArch64PostLegalizerCombinerImpl : public Combiner {
public:
AArch64PostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -468,12 +468,12 @@ class AArch64PostLegalizerCombinerImpl : public Combiner {
AArch64PostLegalizerCombinerImpl::AArch64PostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ false, &VT, MDT, LI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AArch64GenPostLegalizeGICombiner.inc"
@@ -520,8 +520,8 @@ void AArch64PostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
@@ -554,7 +554,7 @@ bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const AArch64Subtarget &ST = MF.getSubtarget<AArch64Subtarget>();
const auto *LI = ST.getLegalizerInfo();
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
IsOptNone ? nullptr
: &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
@@ -570,7 +570,7 @@ bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
CInfo.ObserverLvl = CombinerInfo::ObserverLevel::SinglePass;
// Legalizer performs DCE, so a full DCE pass is unnecessary.
CInfo.EnableFullDCE = false;
- AArch64PostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, CSEInfo,
+ AArch64PostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *VT, CSEInfo,
RuleConfig, ST, MDT, LI);
bool Changed = Impl.combineMachineInstrs();
@@ -769,7 +769,7 @@ INITIALIZE_PASS_BEGIN(AArch64PostLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 MachineInstrs after legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(AArch64PostLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 MachineInstrs after legalization", false,
false)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 6bba70d45a61d..bd50bc6652391 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -1286,7 +1286,7 @@ AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
GISelCSEInfo *CSEInfo,
const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI)
- : Combiner(MF, CInfo, TPC, /*KB*/ nullptr, CSEInfo),
+ : Combiner(MF, CInfo, TPC, /*VT*/ nullptr, CSEInfo),
Helper(Observer, B, /*IsPreLegalize*/ true), RuleConfig(RuleConfig),
STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 76520fe5ce20f..6496d56d74b2c 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -18,7 +18,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
@@ -73,8 +73,8 @@ void applyFConstantToConstant(MachineInstr &MI) {
/// are sign bits. In this case, we can transform the G_ICMP to directly compare
/// the wide value with a zero.
bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
- GISelKnownBits *KB, Register &MatchInfo) {
- assert(MI.getOpcode() == TargetOpcode::G_ICMP && KB);
+ GISelValueTracking *VT, Register &MatchInfo) {
+ assert(MI.getOpcode() == TargetOpcode::G_ICMP && VT);
auto Pred = (CmpInst::Predicate)MI.getOperand(1).getPredicate();
if (!ICmpInst::isEquality(Pred))
@@ -93,7 +93,7 @@ bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
return false;
LLT WideTy = MRI.getType(WideReg);
- if (KB->computeNumSignBits(WideReg) <=
+ if (VT->computeNumSignBits(WideReg) <=
WideTy.getSizeInBits() - LHSTy.getSizeInBits())
return false;
@@ -730,7 +730,7 @@ class AArch64PreLegalizerCombinerImpl : public Combiner {
public:
AArch64PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -753,12 +753,12 @@ class AArch64PreLegalizerCombinerImpl : public Combiner {
AArch64PreLegalizerCombinerImpl::AArch64PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
const AArch64Subtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AArch64GenPreLegalizeGICombiner.inc"
@@ -822,8 +822,8 @@ void AArch64PreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
@@ -856,7 +856,7 @@ bool AArch64PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const Function &F = MF.getFunction();
bool EnableOpt =
MF.getTarget().getOptLevel() != CodeGenOptLevel::None && !skipFunction(F);
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
&getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
CombinerInfo CInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
@@ -868,7 +868,7 @@ bool AArch64PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// This is the first Combiner, so the input IR might contain dead
// instructions.
CInfo.EnableFullDCE = true;
- AArch64PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *KB, CSEInfo,
+ AArch64PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT, CSEInfo,
RuleConfig, ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -878,7 +878,7 @@ INITIALIZE_PASS_BEGIN(AArch64PreLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 machine instrs before legalization",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
INITIALIZE_PASS_END(AArch64PreLegalizerCombiner, DEBUG_TYPE,
"Combine AArch64 machine instrs before legalization", false,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index 46194ab46ff6a..bcc19932fd938 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -19,9 +19,9 @@ using namespace MIPatternMatch;
AMDGPUCombinerHelper::AMDGPUCombinerHelper(
GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize,
- GISelKnownBits *KB, MachineDominatorTree *MDT, const LegalizerInfo *LI,
+ GISelValueTracking *VT, MachineDominatorTree *MDT, const LegalizerInfo *LI,
const GCNSubtarget &STI)
- : CombinerHelper(Observer, B, IsPreLegalize, KB, MDT, LI), STI(STI),
+ : CombinerHelper(Observer, B, IsPreLegalize, VT, MDT, LI), STI(STI),
TII(*STI.getInstrInfo()) {}
LLVM_READNONE
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
index bc3d9daef87c5..96e0b12a168a6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
@@ -28,7 +28,7 @@ class AMDGPUCombinerHelper : public CombinerHelper {
public:
using CombinerHelper::CombinerHelper;
AMDGPUCombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
- bool IsPreLegalize, GISelKnownBits *KB,
+ bool IsPreLegalize, GISelValueTracking *VT,
MachineDominatorTree *MDT, const LegalizerInfo *LI,
const GCNSubtarget &STI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
index 47e32c4864809..00979f44f9d34 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
@@ -10,7 +10,7 @@
#include "AMDGPURegisterBankInfo.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -24,7 +24,8 @@ using namespace MIPatternMatch;
std::pair<Register, unsigned>
AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
- GISelKnownBits *KnownBits, bool CheckNUW) {
+ GISelValueTracking *ValueTracking,
+ bool CheckNUW) {
MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
if (Def->getOpcode() == TargetOpcode::G_CONSTANT) {
unsigned Offset;
@@ -55,8 +56,9 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
}
Register Base;
- if (KnownBits && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) &&
- KnownBits->maskedValueIsZero(Base, APInt(32, Offset, /*isSigned=*/true)))
+ if (ValueTracking && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) &&
+ ValueTracking->maskedValueIsZero(Base,
+ APInt(32, Offset, /*isSigned=*/true)))
return std::pair(Base, Offset);
// Handle G_PTRTOINT (G_PTR_ADD base, const) case
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h
index 70cfdacec700c..0c89bb5cc6100 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h
@@ -17,7 +17,7 @@ namespace llvm {
class MachineRegisterInfo;
class GCNSubtarget;
-class GISelKnownBits;
+class GISelValueTracking;
class LLT;
class MachineFunction;
class MachineIRBuilder;
@@ -28,7 +28,7 @@ namespace AMDGPU {
/// Returns base register and constant offset.
std::pair<Register, unsigned>
getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
- GISelKnownBits *KnownBits = nullptr,
+ GISelValueTracking *ValueTracking = nullptr,
bool CheckNUW = false);
// Currently finds S32/S64 lane masks that can be declared as divergent by
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d36355860a33b..3246e575ea6a9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -19,7 +19,7 @@
#include "AMDGPUMemoryUtils.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -5936,9 +5936,8 @@ unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
}
unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
- GISelKnownBits &Analysis, Register R,
- const APInt &DemandedElts, const MachineRegisterInfo &MRI,
- unsigned Depth) const {
+ GISelValueTracking &Analysis, Register R, const APInt &DemandedElts,
+ const MachineRegisterInfo &MRI, unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
if (!MI)
return 1;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index e647136b395ae..6705f86e15fc2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -315,7 +315,7 @@ class AMDGPUTargetLowering : public TargetLowering {
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
- unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
+ unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis,
Register R,
const APInt &DemandedElts,
const MachineRegisterInfo &MRI,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index d1b1cb788b7d2..984b09ff163b3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -20,7 +20,7 @@
#include "SIMachineFunctionInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -56,14 +56,15 @@ AMDGPUInstructionSelector::AMDGPUInstructionSelector(
const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
-void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
+void AMDGPUInstructionSelector::setupMF(MachineFunction &MF,
+ GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo,
ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) {
MRI = &MF.getRegInfo();
Subtarget = &MF.getSubtarget<GCNSubtarget>();
Subtarget->checkSubtargetFeatures(MF.getFunction());
- InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
+ InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
}
// Return the wave level SGPR base address if this is a wave address.
@@ -1888,7 +1889,7 @@ bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
.addImm(0);
} else {
std::tie(BaseOffset, ImmOffset) =
- AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KB);
+ AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, VT);
if (Readfirstlane) {
// We have the constant offset now, so put the readfirstlane back on the
@@ -3096,7 +3097,7 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
// Try to avoid emitting a bit operation when we only need to touch half of
// the 64-bit pointer.
- APInt MaskOnes = KB->getKnownOnes(MaskReg).zext(64);
+ APInt MaskOnes = VT->getKnownOnes(MaskReg).zext(64);
const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
@@ -3195,12 +3196,12 @@ bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
static std::pair<Register, unsigned>
computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
const TargetRegisterClass *SuperRC, Register IdxReg,
- unsigned EltSize, GISelKnownBits &KnownBits) {
+ unsigned EltSize, GISelValueTracking &ValueTracking) {
Register IdxBaseReg;
int Offset;
std::tie(IdxBaseReg, Offset) =
- AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits);
+ AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &ValueTracking);
if (IdxBaseReg == AMDGPU::NoRegister) {
// This will happen if the index is a known constant. This should ordinarily
// be legalized out, but handle it as a register just in case.
@@ -3252,7 +3253,7 @@ bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
unsigned SubReg;
std::tie(IdxReg, SubReg) = computeIndirectRegIndex(
- *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KB);
+ *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *VT);
if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
if (DstTy.getSizeInBits() != 32 && !Is64)
@@ -3333,7 +3334,7 @@ bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
unsigned SubReg;
std::tie(IdxReg, SubReg) =
- computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *KB);
+ computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *VT);
const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
STI.useVGPRIndexMode();
@@ -4702,7 +4703,7 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
// to be negative if the resulting (Offset + (M0 or SOffset or zero)
// is negative. Handle the case where the Immediate Offset + SOffset
// is negative.
- auto SKnown = KB->getKnownBits(*SOffset);
+ auto SKnown = VT->getKnownBits(*SOffset);
if (*Offset + SKnown.getMinValue().getSExtValue() < 0)
return false;
@@ -5036,8 +5037,8 @@ bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
// The bug affects the swizzling of SVS accesses if there is any carry out
// from the two low order bits (i.e. from bit 1 into bit 2) when adding
// voffset to (soffset + inst_offset).
- auto VKnown = KB->getKnownBits(VAddr);
- auto SKnown = KnownBits::add(KB->getKnownBits(SAddr),
+ auto VKnown = VT->getKnownBits(VAddr);
+ auto SKnown = KnownBits::add(VT->getKnownBits(SAddr),
KnownBits::makeConstant(APInt(32, ImmOffset)));
uint64_t VMax = VKnown.getMaxValue().getZExtValue();
uint64_t SMax = SKnown.getMaxValue().getZExtValue();
@@ -5152,7 +5153,7 @@ AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
if (ConstOffset != 0) {
if (TII.isLegalMUBUFImmOffset(ConstOffset) &&
(!STI.privateMemoryResourceIsRangeChecked() ||
- KB->signBitIsZero(PtrBase))) {
+ VT->signBitIsZero(PtrBase))) {
const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
FI = PtrBaseDef->getOperand(1).getIndex();
@@ -5193,7 +5194,7 @@ bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
// On Southern Islands instruction with a negative base value and an offset
// don't seem to work.
- return KB->signBitIsZero(Base);
+ return VT->signBitIsZero(Base);
}
bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
@@ -5209,7 +5210,7 @@ bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
// On Southern Islands instruction with a negative base value and an offset
// don't seem to work.
- return KB->signBitIsZero(Base);
+ return VT->signBitIsZero(Base);
}
// Return whether the operation has NoUnsignedWrap property.
@@ -5248,7 +5249,7 @@ bool AMDGPUInstructionSelector::isFlatScratchBaseLegal(Register Addr) const {
return true;
}
- return KB->signBitIsZero(LHS);
+ return VT->signBitIsZero(LHS);
}
// Check address value in SGPR/VGPR are legal for flat scratch in the form
@@ -5266,7 +5267,7 @@ bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSV(Register Addr) const {
Register LHS = AddrMI->getOperand(1).getReg();
Register RHS = AddrMI->getOperand(2).getReg();
- return KB->signBitIsZero(RHS) && KB->signBitIsZero(LHS);
+ return VT->signBitIsZero(RHS) && VT->signBitIsZero(LHS);
}
// Check address value in SGPR/VGPR are legal for flat scratch in the form
@@ -5298,7 +5299,7 @@ bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSVImm(
Register LHS = BaseDef->MI->getOperand(1).getReg();
Register RHS = BaseDef->MI->getOperand(2).getReg();
- return KB->signBitIsZero(RHS) && KB->signBitIsZero(LHS);
+ return VT->signBitIsZero(RHS) && VT->signBitIsZero(LHS);
}
bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
@@ -5313,7 +5314,7 @@ bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
if (RHS->countr_one() >= ShAmtBits)
return true;
- const APInt &LHSKnownZeros = KB->getKnownZeroes(MI.getOperand(1).getReg());
+ const APInt &LHSKnownZeros = VT->getKnownZeroes(MI.getOperand(1).getReg());
return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits;
}
@@ -5813,7 +5814,7 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
Register SOffset;
unsigned Offset;
std::tie(SOffset, Offset) = AMDGPU::getBaseWithConstantOffset(
- *MRI, Root.getReg(), KB, /*CheckNUW*/ true);
+ *MRI, Root.getReg(), VT, /*CheckNUW*/ true);
if (!SOffset)
return std::nullopt;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index cc7552868a056..6c3f3026e877a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -58,7 +58,7 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
bool select(MachineInstr &I) override;
static const char *getName();
- void setupMF(MachineFunction &MF, GISelKnownBits *KB,
+ void setupMF(MachineFunction &MF, GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) override;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 668c70780ee90..275d0193452a5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3919,7 +3919,7 @@ void AMDGPULegalizerInfo::buildMultiply(LegalizerHelper &Helper,
using Carry = SmallVector<Register, 2>;
MachineIRBuilder &B = Helper.MIRBuilder;
- GISelKnownBits &KB = *Helper.getKnownBits();
+ GISelValueTracking &VT = *Helper.getValueTracking();
const LLT S1 = LLT::scalar(1);
const LLT S32 = LLT::scalar(32);
@@ -3941,8 +3941,8 @@ void AMDGPULegalizerInfo::buildMultiply(LegalizerHelper &Helper,
SmallVector<bool, 2> Src0KnownZeros, Src1KnownZeros;
for (unsigned i = 0; i < Src0.size(); ++i) {
- Src0KnownZeros.push_back(KB.getKnownBits(Src0[i]).isZero());
- Src1KnownZeros.push_back(KB.getKnownBits(Src1[i]).isZero());
+ Src0KnownZeros.push_back(VT.getKnownBits(Src0[i]).isZero());
+ Src1KnownZeros.push_back(VT.getKnownBits(Src1[i]).isZero());
}
// Merge the given carries into the 32-bit LocalAccum, which is modified
@@ -4014,7 +4014,7 @@ void AMDGPULegalizerInfo::buildMultiply(LegalizerHelper &Helper,
continue;
}
auto Mul = B.buildMul(S32, Src0[j0], Src1[j1]);
- if (!LocalAccum[0] || KB.getKnownBits(LocalAccum[0]).isZero()) {
+ if (!LocalAccum[0] || VT.getKnownBits(LocalAccum[0]).isZero()) {
LocalAccum[0] = Mul.getReg(0);
} else {
if (CarryIn.empty()) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
index 888817e52e35d..b2a8143b82ab6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
@@ -20,7 +20,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -53,7 +53,7 @@ class AMDGPUPostLegalizerCombinerImpl : public Combiner {
public:
AMDGPUPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPUPostLegalizerCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -129,12 +129,12 @@ class AMDGPUPostLegalizerCombinerImpl : public Combiner {
AMDGPUPostLegalizerCombinerImpl::AMDGPUPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPUPostLegalizerCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT, const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo), RuleConfig(RuleConfig), STI(STI),
TII(*STI.getInstrInfo()),
- Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI, STI),
+ Helper(Observer, B, /*IsPreLegalize*/ false, &VT, MDT, LI, STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AMDGPUGenPostLegalizeGICombiner.inc"
#undef GET_GICOMBINER_CONSTRUCTOR_INITS
@@ -218,7 +218,7 @@ bool AMDGPUPostLegalizerCombinerImpl::matchUCharToFloat(
unsigned SrcSize = MRI.getType(SrcReg).getSizeInBits();
assert(SrcSize == 16 || SrcSize == 32 || SrcSize == 64);
const APInt Mask = APInt::getHighBitsSet(SrcSize, SrcSize - 8);
- return Helper.getKnownBits()->maskedValueIsZero(SrcReg, Mask);
+ return Helper.getValueTracking()->maskedValueIsZero(SrcReg, Mask);
}
return false;
@@ -421,14 +421,14 @@ bool AMDGPUPostLegalizerCombinerImpl::matchCombine_s_mul_u64(
if (MRI.getType(Src0) != LLT::scalar(64))
return false;
- if (KB->getKnownBits(Src1).countMinLeadingZeros() >= 32 &&
- KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) {
+ if (VT->getKnownBits(Src1).countMinLeadingZeros() >= 32 &&
+ VT->getKnownBits(Src0).countMinLeadingZeros() >= 32) {
NewOpcode = AMDGPU::G_AMDGPU_S_MUL_U64_U32;
return true;
}
- if (KB->computeNumSignBits(Src1) >= 33 &&
- KB->computeNumSignBits(Src0) >= 33) {
+ if (VT->computeNumSignBits(Src1) >= 33 &&
+ VT->computeNumSignBits(Src0) >= 33) {
NewOpcode = AMDGPU::G_AMDGPU_S_MUL_I64_I32;
return true;
}
@@ -462,8 +462,8 @@ void AMDGPUPostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
@@ -492,7 +492,7 @@ bool AMDGPUPostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const AMDGPULegalizerInfo *LI =
static_cast<const AMDGPULegalizerInfo *>(ST.getLegalizerInfo());
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
IsOptNone ? nullptr
: &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
@@ -504,7 +504,7 @@ bool AMDGPUPostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
CInfo.ObserverLvl = CombinerInfo::ObserverLevel::SinglePass;
// Legalizer performs DCE, so a full DCE pass is unnecessary.
CInfo.EnableFullDCE = false;
- AMDGPUPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, /*CSEInfo*/ nullptr,
+ AMDGPUPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *VT, /*CSEInfo*/ nullptr,
RuleConfig, ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -514,7 +514,7 @@ INITIALIZE_PASS_BEGIN(AMDGPUPostLegalizerCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs after legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(AMDGPUPostLegalizerCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs after legalization", false,
false)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
index 52c6e5274ae5b..4ce3c0107d566 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
@@ -21,7 +21,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -50,7 +50,7 @@ class AMDGPUPreLegalizerCombinerImpl : public Combiner {
public:
AMDGPUPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPUPreLegalizerCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -89,11 +89,11 @@ class AMDGPUPreLegalizerCombinerImpl : public Combiner {
AMDGPUPreLegalizerCombinerImpl::AMDGPUPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPUPreLegalizerCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT, const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB, MDT, LI, STI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo), RuleConfig(RuleConfig), STI(STI),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI, STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AMDGPUGenPreLegalizeGICombiner.inc"
#undef GET_GICOMBINER_CONSTRUCTOR_INITS
@@ -234,8 +234,8 @@ void AMDGPUPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
@@ -262,7 +262,7 @@ bool AMDGPUPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const Function &F = MF.getFunction();
bool EnableOpt =
MF.getTarget().getOptLevel() != CodeGenOptLevel::None && !skipFunction(F);
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
// Enable CSE.
GISelCSEAnalysisWrapper &Wrapper =
@@ -281,7 +281,7 @@ bool AMDGPUPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// This is the first Combiner, so the input IR might contain dead
// instructions.
CInfo.EnableFullDCE = true;
- AMDGPUPreLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, CSEInfo, RuleConfig,
+ AMDGPUPreLegalizerCombinerImpl Impl(MF, CInfo, TPC, *VT, CSEInfo, RuleConfig,
STI, MDT, STI.getLegalizerInfo());
return Impl.combineMachineInstrs();
}
@@ -291,7 +291,7 @@ INITIALIZE_PASS_BEGIN(AMDGPUPreLegalizerCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs before legalization",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(AMDGPUPreLegalizerCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs before legalization", false,
false)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
index 98c48f4fe3705..b416d9756297c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
@@ -21,7 +21,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -53,7 +53,7 @@ class AMDGPURegBankCombinerImpl : public Combiner {
public:
AMDGPURegBankCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPURegBankCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -110,13 +110,13 @@ class AMDGPURegBankCombinerImpl : public Combiner {
AMDGPURegBankCombinerImpl::AMDGPURegBankCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const AMDGPURegBankCombinerImplRuleConfig &RuleConfig,
const GCNSubtarget &STI, MachineDominatorTree *MDT, const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo), RuleConfig(RuleConfig), STI(STI),
RBI(*STI.getRegBankInfo()), TRI(*STI.getRegisterInfo()),
TII(*STI.getInstrInfo()),
- Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI),
+ Helper(Observer, B, /*IsPreLegalize*/ false, &VT, MDT, LI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "AMDGPUGenRegBankGICombiner.inc"
#undef GET_GICOMBINER_CONSTRUCTOR_INITS
@@ -416,8 +416,8 @@ void AMDGPURegBankCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
@@ -443,7 +443,7 @@ bool AMDGPURegBankCombiner::runOnMachineFunction(MachineFunction &MF) {
MF.getTarget().getOptLevel() != CodeGenOptLevel::None && !skipFunction(F);
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
const auto *LI = ST.getLegalizerInfo();
MachineDominatorTree *MDT =
@@ -458,7 +458,7 @@ bool AMDGPURegBankCombiner::runOnMachineFunction(MachineFunction &MF) {
// RegBankSelect seems not to leave dead instructions, so a full DCE pass is
// unnecessary.
CInfo.EnableFullDCE = false;
- AMDGPURegBankCombinerImpl Impl(MF, CInfo, TPC, *KB, /*CSEInfo*/ nullptr,
+ AMDGPURegBankCombinerImpl Impl(MF, CInfo, TPC, *VT, /*CSEInfo*/ nullptr,
RuleConfig, ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -468,7 +468,7 @@ INITIALIZE_PASS_BEGIN(AMDGPURegBankCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs after regbankselect",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(AMDGPURegBankCombiner, DEBUG_TYPE,
"Combine AMDGPU machine instrs after regbankselect", false,
false)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b0c18715ef810..c8645850fe111 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -27,7 +27,7 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/ByteProvider.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -16400,16 +16400,18 @@ void SITargetLowering::computeKnownBitsForFrameIndex(
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
}
-static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB,
- KnownBits &Known, unsigned Dim) {
+static void knownBitsForWorkitemID(const GCNSubtarget &ST,
+ GISelValueTracking &VT, KnownBits &Known,
+ unsigned Dim) {
unsigned MaxValue =
- ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim);
+ ST.getMaxWorkitemID(VT.getMachineFunction().getFunction(), Dim);
Known.Zero.setHighBits(llvm::countl_zero(MaxValue));
}
void SITargetLowering::computeKnownBitsForTargetInstr(
- GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts,
- const MachineRegisterInfo &MRI, unsigned Depth) const {
+ GISelValueTracking &VT, Register R, KnownBits &Known,
+ const APInt &DemandedElts, const MachineRegisterInfo &MRI,
+ unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
switch (MI->getOpcode()) {
case AMDGPU::G_INTRINSIC:
@@ -16417,13 +16419,13 @@ void SITargetLowering::computeKnownBitsForTargetInstr(
Intrinsic::ID IID = cast<GIntrinsic>(MI)->getIntrinsicID();
switch (IID) {
case Intrinsic::amdgcn_workitem_id_x:
- knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0);
+ knownBitsForWorkitemID(*getSubtarget(), VT, Known, 0);
break;
case Intrinsic::amdgcn_workitem_id_y:
- knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1);
+ knownBitsForWorkitemID(*getSubtarget(), VT, Known, 1);
break;
case Intrinsic::amdgcn_workitem_id_z:
- knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2);
+ knownBitsForWorkitemID(*getSubtarget(), VT, Known, 2);
break;
case Intrinsic::amdgcn_mbcnt_lo:
case Intrinsic::amdgcn_mbcnt_hi: {
@@ -16433,7 +16435,7 @@ void SITargetLowering::computeKnownBitsForTargetInstr(
? getSubtarget()->getWavefrontSizeLog2()
: 5);
KnownBits Known2;
- KB.computeKnownBitsImpl(MI->getOperand(3).getReg(), Known2, DemandedElts,
+ VT.computeKnownBitsImpl(MI->getOperand(3).getReg(), Known2, DemandedElts,
Depth + 1);
Known = KnownBits::add(Known, Known2);
break;
@@ -16460,17 +16462,17 @@ void SITargetLowering::computeKnownBitsForTargetInstr(
auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs();
KnownBits Known2;
- KB.computeKnownBitsImpl(Src2, Known2, DemandedElts, Depth + 1);
+ VT.computeKnownBitsImpl(Src2, Known2, DemandedElts, Depth + 1);
if (Known2.isUnknown())
break;
KnownBits Known1;
- KB.computeKnownBitsImpl(Src1, Known1, DemandedElts, Depth + 1);
+ VT.computeKnownBitsImpl(Src1, Known1, DemandedElts, Depth + 1);
if (Known1.isUnknown())
break;
KnownBits Known0;
- KB.computeKnownBitsImpl(Src0, Known0, DemandedElts, Depth + 1);
+ VT.computeKnownBitsImpl(Src0, Known0, DemandedElts, Depth + 1);
if (Known0.isUnknown())
break;
@@ -16483,14 +16485,14 @@ void SITargetLowering::computeKnownBitsForTargetInstr(
}
Align SITargetLowering::computeKnownAlignForTargetInstr(
- GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI,
+ GISelValueTracking &VT, Register R, const MachineRegisterInfo &MRI,
unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
if (auto *GI = dyn_cast<GIntrinsic>(MI)) {
// FIXME: Can this move to generic code? What about the case where the call
// site specifies a lower alignment?
Intrinsic::ID IID = GI->getIntrinsicID();
- LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext();
+ LLVMContext &Ctx = VT.getMachineFunction().getFunction().getContext();
AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID);
if (MaybeAlign RetAlign = Attrs.getRetAlignment())
return *RetAlign;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 8e4717a3f64ab..dc0634331caf9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -511,13 +511,14 @@ class SITargetLowering final : public AMDGPUTargetLowering {
void computeKnownBitsForFrameIndex(int FrameIdx,
KnownBits &Known,
const MachineFunction &MF) const override;
- void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R,
+ void computeKnownBitsForTargetInstr(GISelValueTracking &Analysis, Register R,
KnownBits &Known,
const APInt &DemandedElts,
const MachineRegisterInfo &MRI,
unsigned Depth = 0) const override;
- Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R,
+ Align computeKnownAlignForTargetInstr(GISelValueTracking &Analysis,
+ Register R,
const MachineRegisterInfo &MRI,
unsigned Depth = 0) const override;
bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI,
diff --git a/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
index 56d47007cb1b0..682d89018aec0 100644
--- a/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
@@ -19,7 +19,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -48,7 +48,7 @@ class MipsPostLegalizerCombinerImpl : public Combiner {
public:
MipsPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const MipsPostLegalizerCombinerImplRuleConfig &RuleConfig,
const MipsSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -69,12 +69,12 @@ class MipsPostLegalizerCombinerImpl : public Combiner {
MipsPostLegalizerCombinerImpl::MipsPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const MipsPostLegalizerCombinerImplRuleConfig &RuleConfig,
const MipsSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI),
- Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo), RuleConfig(RuleConfig), STI(STI),
+ Helper(Observer, B, /*IsPreLegalize*/ false, &VT, MDT, LI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "MipsGenPostLegalizeGICombiner.inc"
#undef GET_GICOMBINER_CONSTRUCTOR_INITS
@@ -106,8 +106,8 @@ void MipsPostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
if (!IsOptNone) {
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
@@ -136,13 +136,13 @@ bool MipsPostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const MipsLegalizerInfo *LI =
static_cast<const MipsLegalizerInfo *>(ST.getLegalizerInfo());
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
IsOptNone ? nullptr
: &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
CombinerInfo CInfo(/*AllowIllegalOps*/ false, /*ShouldLegalizeIllegal*/ true,
LI, EnableOpt, F.hasOptSize(), F.hasMinSize());
- MipsPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, /*CSEInfo*/ nullptr,
+ MipsPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *VT, /*CSEInfo*/ nullptr,
RuleConfig, ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -152,7 +152,7 @@ INITIALIZE_PASS_BEGIN(MipsPostLegalizerCombiner, DEBUG_TYPE,
"Combine Mips machine instrs after legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(MipsPostLegalizerCombiner, DEBUG_TYPE,
"Combine Mips machine instrs after legalization", false,
false)
diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index 80c1a5eaa52dc..b94d61c9608bf 100644
--- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -16,7 +16,7 @@
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/InitializePasses.h"
@@ -41,12 +41,13 @@ class MipsPreLegalizerCombinerImpl : public Combiner {
public:
MipsPreLegalizerCombinerImpl(MachineFunction &MF, CombinerInfo &CInfo,
- const TargetPassConfig *TPC, GISelKnownBits &KB,
- GISelCSEInfo *CSEInfo, const MipsSubtarget &STI,
+ const TargetPassConfig *TPC,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
+ const MipsSubtarget &STI,
MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo), STI(STI),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB, MDT, LI) {}
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo), STI(STI),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI) {}
static const char *getName() { return "MipsPreLegalizerCombiner"; }
@@ -102,8 +103,8 @@ class MipsPreLegalizerCombiner : public MachineFunctionPass {
void MipsPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
MachineFunctionPass::getAnalysisUsage(AU);
@@ -123,9 +124,9 @@ bool MipsPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const MipsLegalizerInfo *LI =
static_cast<const MipsLegalizerInfo *>(ST.getLegalizerInfo());
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MipsPreLegalizerCombinerInfo PCInfo;
- MipsPreLegalizerCombinerImpl Impl(MF, PCInfo, TPC, *KB, /*CSEInfo*/ nullptr,
+ MipsPreLegalizerCombinerImpl Impl(MF, PCInfo, TPC, *VT, /*CSEInfo*/ nullptr,
ST, /*MDT*/ nullptr, LI);
return Impl.combineMachineInstrs();
}
@@ -135,7 +136,7 @@ INITIALIZE_PASS_BEGIN(MipsPreLegalizerCombiner, DEBUG_TYPE,
"Combine Mips machine instrs before legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(MipsPreLegalizerCombiner, DEBUG_TYPE,
"Combine Mips machine instrs before legalization", false,
false)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index bf38af921d074..18ce5407f816c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -16,7 +16,7 @@
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
@@ -44,10 +44,10 @@ class RISCVInstructionSelector : public InstructionSelector {
bool select(MachineInstr &MI) override;
- void setupMF(MachineFunction &MF, GISelKnownBits *KB,
+ void setupMF(MachineFunction &MF, GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) override {
- InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
+ InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
MRI = &MF.getRegInfo();
}
@@ -305,7 +305,7 @@ RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
} else {
// SimplifyDemandedBits may have optimized the mask so try restoring any
// bits that are known zero.
- KnownBits Known = KB->getKnownBits(AndSrcReg);
+ KnownBits Known = VT->getKnownBits(AndSrcReg);
if (ShMask.isSubsetOf(AndMask | Known.Zero))
ShAmtReg = AndSrcReg;
}
@@ -361,7 +361,7 @@ RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
}
unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
- if ((Size - KB->computeNumSignBits(RootReg)) < Bits)
+ if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
return std::nullopt;
@@ -385,7 +385,7 @@ RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
- if (KB->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
+ if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
return std::nullopt;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
index aa44c0c15bb94..b3f27ea95b79c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
@@ -16,7 +16,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -45,7 +45,7 @@ class RISCVO0PreLegalizerCombinerImpl : public Combiner {
public:
RISCVO0PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVO0PreLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI);
@@ -65,11 +65,11 @@ class RISCVO0PreLegalizerCombinerImpl : public Combiner {
RISCVO0PreLegalizerCombinerImpl::RISCVO0PreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVO0PreLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB), RuleConfig(RuleConfig),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT), RuleConfig(RuleConfig),
STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "RISCVGenO0PreLegalizeGICombiner.inc"
@@ -103,8 +103,8 @@ void RISCVO0PreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -123,7 +123,7 @@ bool RISCVO0PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
auto &TPC = getAnalysis<TargetPassConfig>();
const Function &F = MF.getFunction();
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
@@ -134,7 +134,7 @@ bool RISCVO0PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// at the cost of possibly missing optimizations. See PR#94291 for details.
CInfo.MaxIterations = 1;
- RISCVO0PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *KB,
+ RISCVO0PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT,
/*CSEInfo*/ nullptr, RuleConfig, ST);
return Impl.combineMachineInstrs();
}
@@ -144,7 +144,7 @@ INITIALIZE_PASS_BEGIN(RISCVO0PreLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V machine instrs before legalization",
false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
INITIALIZE_PASS_END(RISCVO0PreLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V machine instrs before legalization", false,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
index c558ed66f3a15..29136d8b8bf04 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -51,7 +51,7 @@ class RISCVPostLegalizerCombinerImpl : public Combiner {
public:
RISCVPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVPostLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -72,12 +72,12 @@ class RISCVPostLegalizerCombinerImpl : public Combiner {
RISCVPostLegalizerCombinerImpl::RISCVPostLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVPostLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ false, &VT, MDT, LI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "RISCVGenPostLegalizeGICombiner.inc"
@@ -107,8 +107,8 @@ void RISCVPostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
@@ -139,7 +139,7 @@ bool RISCVPostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
const auto *LI = ST.getLegalizerInfo();
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
&getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
GISelCSEAnalysisWrapper &Wrapper =
@@ -149,8 +149,8 @@ bool RISCVPostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
CombinerInfo CInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
/*LegalizerInfo*/ nullptr, EnableOpt, F.hasOptSize(),
F.hasMinSize());
- RISCVPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, CSEInfo,
- RuleConfig, ST, MDT, LI);
+ RISCVPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *VT, CSEInfo, RuleConfig,
+ ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -159,7 +159,7 @@ INITIALIZE_PASS_BEGIN(RISCVPostLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V MachineInstrs after legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(RISCVPostLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V MachineInstrs after legalization", false,
false)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
index efcb24706886e..0c5a09a925bb6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
@@ -17,7 +17,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -47,7 +47,7 @@ class RISCVPreLegalizerCombinerImpl : public Combiner {
public:
RISCVPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVPreLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -68,12 +68,12 @@ class RISCVPreLegalizerCombinerImpl : public Combiner {
RISCVPreLegalizerCombinerImpl::RISCVPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const RISCVPreLegalizerCombinerImplRuleConfig &RuleConfig,
const RISCVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "RISCVGenPreLegalizeGICombiner.inc"
@@ -105,8 +105,8 @@ void RISCVPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
AU.addRequired<GISelCSEAnalysisWrapperPass>();
@@ -139,7 +139,7 @@ bool RISCVPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const Function &F = MF.getFunction();
bool EnableOpt =
MF.getTarget().getOptLevel() != CodeGenOptLevel::None && !skipFunction(F);
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
&getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
CombinerInfo CInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
@@ -151,7 +151,7 @@ bool RISCVPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// This is the first Combiner, so the input IR might contain dead
// instructions.
CInfo.EnableFullDCE = true;
- RISCVPreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *KB, CSEInfo, RuleConfig,
+ RISCVPreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT, CSEInfo, RuleConfig,
ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -161,7 +161,7 @@ INITIALIZE_PASS_BEGIN(RISCVPreLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V machine instrs before legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
INITIALIZE_PASS_END(RISCVPreLegalizerCombiner, DEBUG_TYPE,
"Combine RISC-V machine instrs before legalization", false,
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index fb37f91af254f..424f23dada226 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -79,7 +79,7 @@ class SPIRVInstructionSelector : public InstructionSelector {
SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
const SPIRVSubtarget &ST,
const RegisterBankInfo &RBI);
- void setupMF(MachineFunction &MF, GISelKnownBits *KB,
+ void setupMF(MachineFunction &MF, GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) override;
// Common selection code. Instruction-specific selection occurs in spvSelect.
@@ -363,13 +363,14 @@ SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
{
}
-void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
+void SPIRVInstructionSelector::setupMF(MachineFunction &MF,
+ GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo,
ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) {
MRI = &MF.getRegInfo();
GR.setCurrentFunc(MF);
- InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
+ InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
}
// Ensure that register classes correspond to pattern matching rules.
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 747c18b092954..3a68def3df058 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -18,7 +18,7 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
@@ -41,7 +41,7 @@ class SPIRVPreLegalizer : public MachineFunctionPass {
} // namespace
void SPIRVPreLegalizer::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
index 71f08aedd7777..ec688762ca0a5 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
@@ -22,7 +22,7 @@
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -123,7 +123,7 @@ class SPIRVPreLegalizerCombinerImpl : public Combiner {
public:
SPIRVPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const SPIRVPreLegalizerCombinerImplRuleConfig &RuleConfig,
const SPIRVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI);
@@ -146,12 +146,12 @@ class SPIRVPreLegalizerCombinerImpl : public Combiner {
SPIRVPreLegalizerCombinerImpl::SPIRVPreLegalizerCombinerImpl(
MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
- GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
+ GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
const SPIRVPreLegalizerCombinerImplRuleConfig &RuleConfig,
const SPIRVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
- : Combiner(MF, CInfo, TPC, &KB, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &KB, MDT, LI),
+ : Combiner(MF, CInfo, TPC, &VT, CSEInfo),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "SPIRVGenPreLegalizeGICombiner.inc"
@@ -188,8 +188,8 @@ void SPIRVPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetPassConfig>();
AU.setPreservesCFG();
getSelectionDAGFallbackAnalysisUsage(AU);
- AU.addRequired<GISelKnownBitsAnalysis>();
- AU.addPreserved<GISelKnownBitsAnalysis>();
+ AU.addRequired<GISelValueTrackingAnalysis>();
+ AU.addPreserved<GISelValueTrackingAnalysis>();
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addPreserved<MachineDominatorTreeWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -215,7 +215,7 @@ bool SPIRVPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
const Function &F = MF.getFunction();
bool EnableOpt =
MF.getTarget().getOptLevel() != CodeGenOptLevel::None && !skipFunction(F);
- GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+ GISelValueTracking *VT = &getAnalysis<GISelValueTrackingAnalysis>().get(MF);
MachineDominatorTree *MDT =
&getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
CombinerInfo CInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
@@ -227,7 +227,7 @@ bool SPIRVPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
// This is the first Combiner, so the input IR might contain dead
// instructions.
CInfo.EnableFullDCE = false;
- SPIRVPreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *KB, /*CSEInfo*/ nullptr,
+ SPIRVPreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT, /*CSEInfo*/ nullptr,
RuleConfig, ST, MDT, LI);
return Impl.combineMachineInstrs();
}
@@ -237,7 +237,7 @@ INITIALIZE_PASS_BEGIN(SPIRVPreLegalizerCombiner, DEBUG_TYPE,
"Combine SPIRV machine instrs before legalization", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
+INITIALIZE_PASS_DEPENDENCY(GISelValueTrackingAnalysis)
INITIALIZE_PASS_END(SPIRVPreLegalizerCombiner, DEBUG_TYPE,
"Combine SPIRV machine instrs before legalization", false,
false)
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
index 34a36ba68d7c0..de29cbcd29476 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "GISelMITest.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
TEST_F(AArch64GISelMITest, TestKnownBitsCst) {
@@ -20,7 +20,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCst) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
unsigned SrcReg = FinalCopy->getOperand(1).getReg();
unsigned DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)1, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0xfe, Res.Zero.getZExtValue());
@@ -40,7 +40,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCstWithClass) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
unsigned SrcReg = FinalCopy->getOperand(1).getReg();
unsigned DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// We can't analyze %3 due to the register class constraint. We will get a
// default-constructed KnownBits back.
@@ -79,7 +79,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCstPHI) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)2, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0xfc, Res.Zero.getZExtValue());
@@ -113,7 +113,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCstPHIToNonGenericReg) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -151,7 +151,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUnknownPHI) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -188,7 +188,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCstPHIWithLoop) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -227,7 +227,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsDecreasingCstPHIWithLoop) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF, /*MaxDepth=*/24);
+ GISelValueTracking Info(*MF, /*MaxDepth=*/24);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
// A single iteration on the PHI (%13) gives:
@@ -254,7 +254,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsPtrToIntViceVersa) {
unsigned CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
unsigned SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(256u, Res.One.getZExtValue());
EXPECT_EQ(0xfffffeffu, Res.Zero.getZExtValue());
@@ -283,7 +283,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsAND) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// 00??1?10
// & 00?11000
@@ -315,7 +315,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsOR) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// 00??1?10
// | 00?11000
@@ -347,7 +347,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsXOR) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// Xor KnowBits does not track if we are doing xor of unknown bit with itself
// or negated itself.
@@ -369,7 +369,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsXORConstant) {
unsigned CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
unsigned SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(3u, Res.One.getZExtValue());
EXPECT_EQ(252u, Res.Zero.getZExtValue());
@@ -402,7 +402,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsASHR) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
// 11?01??0 >> 2
// = 1111?01?
@@ -446,7 +446,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsLSHR) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
// 11?01??0 >> 2
// = 0011?01?
@@ -483,7 +483,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSHL) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// 01??10?? << 3
// = ?10??000
@@ -514,7 +514,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsADD) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// Add KnowBits works out known carry bits first and then calculates result.
// 001?01?101?000?0
@@ -547,7 +547,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSUB) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// Sub KnowBits for LHS - RHS use Add KnownBits for LHS + ~RHS + 1.
EXPECT_EQ(0x01CDu, Res.One.getZExtValue());
@@ -574,7 +574,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsMUL) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// Mul KnowBits are conservatively correct, but not guaranteed to be precise.
// Precise for trailing bits up to the first unknown bit.
@@ -602,7 +602,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsICMP) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// For targets that use 0 or 1 as icmp result in large register set high bits
// to 0, does not analyze operands/compare predicate.
@@ -625,7 +625,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsFCMP) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// For targets that use 0 or 1 as fcmp result in large register set high bits
// to 0, does not analyze operands/compare predicate.
@@ -657,7 +657,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSelect) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// Select KnownBits takes common bits of LHS and RHS, does not analyze
// condition operand.
@@ -689,7 +689,7 @@ TEST_F(AArch64GISelMITest, TestKnownBits) {
unsigned CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
unsigned SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Known = Info.getKnownBits(SrcReg);
EXPECT_FALSE(Known.hasConflict());
EXPECT_EQ(32u, Known.One.getZExtValue());
@@ -707,7 +707,7 @@ TEST_F(AArch64GISelMITest, TestSignBitIsZero) {
auto SignBit = B.buildConstant(S32, 0x80000000);
auto Zero = B.buildConstant(S32, 0);
- GISelKnownBits KnownBits(*MF);
+ GISelValueTracking KnownBits(*MF);
EXPECT_TRUE(KnownBits.signBitIsZero(Zero.getReg(0)));
EXPECT_FALSE(KnownBits.signBitIsZero(SignBit.getReg(0)));
@@ -737,7 +737,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsConstant) {
Register CopyReg32 = Copies[Copies.size() - 2];
Register CopyRegNeg32 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(7u, Info.computeNumSignBits(CopyReg1));
EXPECT_EQ(8u, Info.computeNumSignBits(CopyRegNeg1));
EXPECT_EQ(1u, Info.computeNumSignBits(CopyReg127));
@@ -775,7 +775,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsXOR) {
Register Copy4 = Copies[Copies.size() - 2];
Register Copy5 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(7u, Info.computeNumSignBits(Copy1));
EXPECT_EQ(2u, Info.computeNumSignBits(Copy2));
EXPECT_EQ(1u, Info.computeNumSignBits(Copy3));
@@ -813,7 +813,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsOR) {
Register Copy4 = Copies[Copies.size() - 2];
Register Copy5 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(8u, Info.computeNumSignBits(Copy1));
EXPECT_EQ(2u, Info.computeNumSignBits(Copy2));
EXPECT_EQ(1u, Info.computeNumSignBits(Copy3));
@@ -851,7 +851,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsAND) {
Register Copy4 = Copies[Copies.size() - 2];
Register Copy5 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(7u, Info.computeNumSignBits(Copy1));
EXPECT_EQ(8u, Info.computeNumSignBits(Copy2));
EXPECT_EQ(2u, Info.computeNumSignBits(Copy3));
@@ -874,7 +874,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsSext) {
Register CopySextLoad = Copies[Copies.size() - 2];
Register CopySextNeg1 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(25u, Info.computeNumSignBits(CopySextLoad));
EXPECT_EQ(32u, Info.computeNumSignBits(CopySextNeg1));
}
@@ -930,7 +930,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsSextInReg) {
Register CopyInReg9Sext = Copies[Copies.size() - 2];
Register CopyInReg31Sext = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
EXPECT_EQ(24u, Info.computeNumSignBits(CopyInReg9));
@@ -998,7 +998,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsAssertSext) {
Register CopyInReg9Sext = Copies[Copies.size() - 2];
Register CopyInReg31Sext = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(32u, Info.computeNumSignBits(CopyInReg1));
EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
@@ -1032,7 +1032,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsTrunc) {
Register CopyTruncNeg1 = Copies[Copies.size() - 2];
Register CopyTrunc7 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(1u, Info.computeNumSignBits(CopyTruncLoad));
EXPECT_EQ(8u, Info.computeNumSignBits(CopyTruncNeg1));
EXPECT_EQ(5u, Info.computeNumSignBits(CopyTrunc7));
@@ -1061,7 +1061,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsCmp) {
Register CopyScalarFCMP = Copies[Copies.size() - 2];
Register CopyScalarICMP = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(32u, Info.computeNumSignBits(CopyVecFCMP));
EXPECT_EQ(32u, Info.computeNumSignBits(CopyVecICMP));
EXPECT_EQ(31u, Info.computeNumSignBits(CopyScalarFCMP));
@@ -1093,7 +1093,7 @@ TEST_F(AMDGPUGISelMITest, TestNumSignBitsTrunc) {
Register CopyLoadUShort = Copies[Copies.size() - 2];
Register CopyLoadSShort = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(24u, Info.computeNumSignBits(CopyLoadUByte));
EXPECT_EQ(25u, Info.computeNumSignBits(CopyLoadSByte));
@@ -1124,7 +1124,7 @@ TEST_F(AMDGPUGISelMITest, TestTargetKnownAlign) {
Register CopyImplicitArgPtr = Copies[Copies.size() - 2];
Register CopyImplicitBufferPtr = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(Align(4), Info.computeKnownAlignment(CopyDispatchPtr));
EXPECT_EQ(Align(4), Info.computeKnownAlignment(CopyQueuePtr));
@@ -1180,7 +1180,7 @@ TEST_F(AMDGPUGISelMITest, TestIsKnownToBeAPowerOfTwo) {
if (!TM)
GTEST_SKIP();
- GISelKnownBits KB(*MF);
+ GISelValueTracking VT(*MF);
Register CopyZero = Copies[Copies.size() - 12];
Register CopyOne = Copies[Copies.size() - 11];
@@ -1197,21 +1197,21 @@ TEST_F(AMDGPUGISelMITest, TestIsKnownToBeAPowerOfTwo) {
Register CopyLShrSignMask = Copies[Copies.size() - 2];
Register CopyOrPow2 = Copies[Copies.size() - 1];
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyZero, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOne, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTwo, *MRI, &KB));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyThree, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyZero, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOne, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTwo, *MRI, &VT));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyThree, *MRI, &VT));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyTruncTwo, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncThree, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncFive, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyTruncTwo, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncThree, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncFive, *MRI, &VT));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyShl1, *MRI, &KB));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl2, *MRI, &KB));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyShl1, *MRI, &VT));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl2, *MRI, &VT));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrNotSignMask, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyLShrSignMask, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrNotSignMask, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyLShrSignMask, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &VT));
}
static void AddRangeMetadata(LLVMContext &Context, MachineInstr *Load) {
@@ -1255,7 +1255,7 @@ TEST_F(AArch64GISelMITest, TestMetadata) {
MachineInstr *Load = MRI->getVRegDef(Ext->getOperand(1).getReg());
AddRangeMetadata(Context, Load);
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
// We don't know what the result of the load is, so we don't know any ones.
@@ -1282,7 +1282,7 @@ TEST_F(AArch64GISelMITest, TestMetadataExt) {
MachineInstr *Load = MRI->getVRegDef(SrcReg);
AddRangeMetadata(Context, Load);
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_TRUE(Res.One.isZero());
EXPECT_EQ(Res.Zero.getZExtValue(), 0xfeu);
@@ -1302,7 +1302,7 @@ TEST_F(AArch64GISelMITest, TestMetadataZExt) {
MachineInstr *Load = MRI->getVRegDef(SrcReg);
AddRangeMetadata(Context, Load);
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_TRUE(Res.One.isZero());
EXPECT_EQ(Res.Zero.getZExtValue(), 0xfffffffe);
@@ -1322,7 +1322,7 @@ TEST_F(AArch64GISelMITest, TestMetadataSExt) {
MachineInstr *Load = MRI->getVRegDef(SrcReg);
AddRangeMetadata(Context, Load);
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_TRUE(Res.One.isZero());
EXPECT_EQ(Res.Zero.getZExtValue(), 0xfffffffe);
@@ -1345,7 +1345,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsExt) {
Register CopyRegZ = Copies[Copies.size() - 2];
Register CopyRegS = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
MachineInstr *Copy;
Register SrcReg;
KnownBits Res;
@@ -1411,7 +1411,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSextInReg) {
setUp(MIRString);
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res;
auto GetKB = [&](unsigned Idx) {
Register CopyReg = Copies[Idx];
@@ -1482,7 +1482,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsAssertSext) {
setUp(MIRString);
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res;
auto GetKB = [&](unsigned Idx) {
Register CopyReg = Copies[Idx];
@@ -1531,7 +1531,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsMergeValues) {
const uint64_t TestVal = UINT64_C(0xabcd123344568998);
Register CopyMerge = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(CopyMerge);
EXPECT_EQ(64u, Res.getBitWidth());
EXPECT_EQ(TestVal, Res.One.getZExtValue());
@@ -1553,7 +1553,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUnmergeValues) {
GTEST_SKIP();
const uint64_t TestVal = UINT64_C(0xabcd123344568998);
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
int Offset = -4;
for (unsigned BitOffset = 0; BitOffset != 64; BitOffset += 16, ++Offset) {
@@ -1585,7 +1585,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsBSwapBitReverse) {
Register CopyBSwap = Copies[Copies.size() - 2];
Register CopyBitReverse = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits BSwapKnown = Info.getKnownBits(CopyBSwap);
EXPECT_EQ(32u, BSwapKnown.getBitWidth());
@@ -1632,7 +1632,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUMAX) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
// Compares min/max of LHS and RHS, min uses 0 for unknown bits, max uses 1.
// If min(LHS) >= max(RHS) returns KnownBits for LHS, similar for RHS. If this
// fails tries to calculate individual bits: common bits for both operands and
@@ -1668,7 +1668,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUMax) {
GTEST_SKIP();
Register CopyUMax = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits KnownUmax = Info.getKnownBits(CopyUMax);
EXPECT_EQ(64u, KnownUmax.getBitWidth());
@@ -1702,7 +1702,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUMIN) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
// Flips the range of operands: [0, 0xFFFFFFFF] <-> [0xFFFFFFFF, 0],
// uses umax and flips result back.
@@ -1736,7 +1736,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSMAX) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
// Flips the range of operands: [-0x80000000, 0x7FFFFFFF] <-> [0, 0xFFFFFFFF],
// uses umax and flips result back.
@@ -1771,7 +1771,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsSMIN) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
// Flips the range of operands: [-0x80000000, 0x7FFFFFFF] <-> [0xFFFFFFFF, 0],
// uses umax and flips result back.
@@ -1805,7 +1805,7 @@ TEST_F(AArch64GISelMITest, TestInvalidQueries) {
MachineInstr *BiggerSizedCopy = MRI->getVRegDef(BiggerSizedCopyReg);
Register BiggerSizedShl = BiggerSizedCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits EqSizeRes = Info.getKnownBits(EqSizedShl);
KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
@@ -1844,7 +1844,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsAssertZext) {
Register CopyAssert63 = Copies[Copies.size() - 2];
Register CopyAssert3 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
MachineInstr *Copy;
Register SrcReg;
KnownBits Res;
@@ -1906,7 +1906,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsCTPOP) {
Register FourCopy = Copies[Copies.size() - 2];
Register OneCopy = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
MachineInstr *Copy;
Register SrcReg;
KnownBits Res;
@@ -1977,7 +1977,7 @@ TEST_F(AMDGPUGISelMITest, TestKnownBitsUBFX) {
MachineInstr *CopyUnkWidthBfx = MRI->getVRegDef(CopyUnkWidthBfxReg);
Register UnkWidthSrcReg = CopyUnkWidthBfx->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res1 = Info.getKnownBits(SrcReg);
EXPECT_EQ(0u, Res1.One.getZExtValue());
@@ -2037,7 +2037,7 @@ TEST_F(AMDGPUGISelMITest, TestKnownBitsSBFX) {
MachineInstr *CopyUnkWidthBfx = MRI->getVRegDef(CopyUnkWidthBfxReg);
Register UnkWidthSrcReg = CopyUnkWidthBfx->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res1 = Info.getKnownBits(SrcReg);
EXPECT_EQ(0u, Res1.One.getZExtValue());
@@ -2086,7 +2086,7 @@ TEST_F(AMDGPUGISelMITest, TestNumSignBitsUBFX) {
Register CopyUnkOffBfxReg = Copies[Copies.size() - 2];
Register CopyUnkWidthBfxReg = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(24u, Info.computeNumSignBits(CopyUnkBfxReg));
EXPECT_EQ(29u, Info.computeNumSignBits(CopyPosBfxReg));
EXPECT_EQ(24u, Info.computeNumSignBits(CopyNegBfxReg));
@@ -2120,7 +2120,7 @@ TEST_F(AMDGPUGISelMITest, TestNumSignBitsSBFX) {
Register CopyUnkValBfxReg = Copies[Copies.size() - 2];
Register CopyUnkOffBfxReg = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(32u, Info.computeNumSignBits(CopyNegBfxReg));
EXPECT_EQ(29u, Info.computeNumSignBits(CopyPosBfxReg));
EXPECT_EQ(29u, Info.computeNumSignBits(CopyHiSetBfxReg));
@@ -2151,7 +2151,7 @@ TEST_F(AMDGPUGISelMITest, TestKnownBitsAssertAlign) {
setUp(MIRString);
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res;
auto GetKB = [&](unsigned Idx) {
@@ -2191,7 +2191,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUADDO) {
GTEST_SKIP();
Register CopyOverflow = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(CopyOverflow);
EXPECT_EQ(0u, Res.One.getZExtValue());
EXPECT_EQ(31u, Res.Zero.countl_one());
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
index 2f3336e9085b6..d3a619337ae97 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "GISelMITest.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
TEST_F(AArch64GISelMITest, TestKnownBitsBuildVector) {
@@ -33,7 +33,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsBuildVector) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
// BuildVector KnownBits takes common bits of all elements.
// 111??000
@@ -75,7 +75,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHI) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)2, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0xfc, Res.Zero.getZExtValue());
@@ -111,7 +111,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHIToNonGenericReg) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -147,7 +147,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorUnknownPHI) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -185,7 +185,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHIWithLoop) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
@@ -217,7 +217,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorDecreasingCstPHIWithLoop) {
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
Register DstReg = FinalCopy->getOperand(0).getReg();
- GISelKnownBits Info(*MF, /*MaxDepth=*/24);
+ GISelValueTracking Info(*MF, /*MaxDepth=*/24);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
EXPECT_EQ((uint64_t)0xC0, Res.Zero.getZExtValue());
@@ -254,7 +254,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorAND) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x08u, Res.One.getZExtValue());
EXPECT_EQ(0xC7u, Res.Zero.getZExtValue());
@@ -287,7 +287,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorOR) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x1Au, Res.One.getZExtValue());
EXPECT_EQ(0xC1u, Res.Zero.getZExtValue());
@@ -320,7 +320,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorXOR) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x02u, Res.One.getZExtValue());
EXPECT_EQ(0xC9u, Res.Zero.getZExtValue());
@@ -345,7 +345,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorXORConstant) {
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
Register CopySplatReg = Copies[Copies.size() - 2];
MachineInstr *FinalSplatCopy = MRI->getVRegDef(CopySplatReg);
Register SrcSplatReg = FinalSplatCopy->getOperand(1).getReg();
@@ -393,7 +393,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorASHR) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0xF2u, Res0.One.getZExtValue());
EXPECT_EQ(0x04u, Res0.Zero.getZExtValue());
@@ -438,7 +438,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorLSHR) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0x32u, Res0.One.getZExtValue());
EXPECT_EQ(0xC4u, Res0.Zero.getZExtValue());
@@ -474,7 +474,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSHL) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x40u, Res.One.getZExtValue());
EXPECT_EQ(0x27u, Res.Zero.getZExtValue());
@@ -507,7 +507,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorADD) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x0091u, Res.One.getZExtValue());
EXPECT_EQ(0x8108u, Res.Zero.getZExtValue());
@@ -540,7 +540,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSUB) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x01CDu, Res.One.getZExtValue());
EXPECT_EQ(0xC810u, Res.Zero.getZExtValue());
@@ -569,7 +569,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorMUL) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x0008u, Res.One.getZExtValue());
EXPECT_EQ(0xFE07u, Res.Zero.getZExtValue());
@@ -604,7 +604,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSelect) {
Register CopyReg = Copies[Copies.size() - 1];
MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
Register SrcReg = FinalCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(SrcReg);
EXPECT_EQ(0x20u, Res.One.getZExtValue());
EXPECT_EQ(0x01u, Res.Zero.getZExtValue());
@@ -627,12 +627,12 @@ TEST_F(AArch64GISelMITest, TestVectorSignBitIsZero) {
auto NonSplat2 =
B.buildBuildVector(V2S32, {B.buildConstant(S32, 0x80000000).getReg(0),
B.buildConstant(S32, 0x80000004).getReg(0)});
- // signBitIsZero is true for elt 0 and false for elt 1 GISelKnownBits takes
- // common bits so this is false.
+ // signBitIsZero is true for elt 0 and false for elt 1 GISelValueTracking
+ // takes common bits so this is false.
auto NonSplat3 =
B.buildBuildVector(V2S32, {B.buildConstant(S32, 0x80000000).getReg(0),
B.buildConstant(S32, 0x8).getReg(0)});
- GISelKnownBits KnownBits(*MF);
+ GISelValueTracking KnownBits(*MF);
EXPECT_TRUE(KnownBits.signBitIsZero(Zero.getReg(0)));
EXPECT_FALSE(KnownBits.signBitIsZero(SignBit.getReg(0)));
@@ -682,7 +682,7 @@ TEST_F(AArch64GISelMITest, TestVectorNumSignBitsConstant) {
Register NonSplatSameSign = Copies[Copies.size() - 2];
Register NonSplatDifferentSign = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
// If it is known that all elts have same sign looks at common bits and
// effectively returns smallest NumSignBits of all the elts. Otherwise returns
// default value 1.
@@ -720,7 +720,7 @@ TEST_F(AArch64GISelMITest, TestVectorNumSignBitsSext) {
Register CopySextNeg1 = Copies[Copies.size() - 2];
Register CopySextNonSplat = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(25u, Info.computeNumSignBits(CopySextLoad));
EXPECT_EQ(32u, Info.computeNumSignBits(CopySextNeg1));
EXPECT_EQ(28u, Info.computeNumSignBits(CopySextNonSplat));
@@ -777,7 +777,7 @@ TEST_F(AArch64GISelMITest, TestVectorNumSignBitsSextInReg) {
Register CopyInReg9Sext = Copies[Copies.size() - 2];
Register CopyInReg31Sext = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
EXPECT_EQ(24u, Info.computeNumSignBits(CopyInReg9));
@@ -845,7 +845,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsVectorAssertSext) {
Register CopyInReg9Sext = Copies[Copies.size() - 2];
Register CopyInReg31Sext = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(32u, Info.computeNumSignBits(CopyInReg1));
EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
@@ -885,7 +885,7 @@ TEST_F(AArch64GISelMITest, TestVectorNumSignBitsTrunc) {
Register CopyTruncNeg1 = Copies[Copies.size() - 2];
Register CopyTrunc7 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(1u, Info.computeNumSignBits(CopyTruncLoad));
EXPECT_EQ(8u, Info.computeNumSignBits(CopyTruncNeg1));
EXPECT_EQ(5u, Info.computeNumSignBits(CopyTrunc7));
@@ -944,7 +944,7 @@ TEST_F(AMDGPUGISelMITest, TestVectorIsKnownToBeAPowerOfTwo) {
if (!TM)
GTEST_SKIP();
- GISelKnownBits KB(*MF);
+ GISelValueTracking VT(*MF);
Register CopyZero = Copies[Copies.size() - 12];
Register CopyOne = Copies[Copies.size() - 11];
@@ -961,21 +961,21 @@ TEST_F(AMDGPUGISelMITest, TestVectorIsKnownToBeAPowerOfTwo) {
Register CopyLShrSignMask = Copies[Copies.size() - 2];
Register CopyOrPow2 = Copies[Copies.size() - 1];
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyZero, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOne, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTwo, *MRI, &KB));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyThree, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyZero, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOne, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTwo, *MRI, &VT));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyThree, *MRI, &VT));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyTruncTwo, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncThree, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncFive, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyTruncTwo, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncThree, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncFive, *MRI, &VT));
// TODO: check for vector(splat) shift amount.
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl1, *MRI, &KB));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl2, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl1, *MRI, &VT));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl2, *MRI, &VT));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrNotSignMask, *MRI, &KB));
- EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrSignMask, *MRI, &KB));
- EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &KB));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrNotSignMask, *MRI, &VT));
+ EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrSignMask, *MRI, &VT));
+ EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &VT));
}
TEST_F(AArch64GISelMITest, TestVectorMetadata) {
@@ -1014,7 +1014,7 @@ TEST_F(AArch64GISelMITest, TestVectorMetadata) {
MIB.buildLoad(Load->getOperand(0), Load->getOperand(1), NewMMO);
Load->eraseFromParent();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
EXPECT_TRUE(Res.One.isZero());
@@ -1045,7 +1045,7 @@ TEST_F(AArch64GISelMITest, TestVectorKnownBitsExt) {
Register CopyRegZ = Copies[Copies.size() - 2];
Register CopyRegS = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
MachineInstr *Copy;
Register SrcReg;
KnownBits Res;
@@ -1114,7 +1114,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSextInReg) {
setUp(MIRString);
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res;
auto GetKB = [&](unsigned Idx) {
Register CopyReg = Copies[Idx];
@@ -1182,7 +1182,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertSext) {
setUp(MIRString);
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res;
auto GetKB = [&](unsigned Idx) {
Register CopyReg = Copies[Idx];
@@ -1234,7 +1234,7 @@ TEST_F(AArch64GISelMITest, TestVectorKnownBitsBSwapBitReverse) {
Register CopyBSwap = Copies[Copies.size() - 2];
Register CopyBitReverse = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits BSwapKnown = Info.getKnownBits(CopyBSwap);
EXPECT_EQ(32u, BSwapKnown.getBitWidth());
@@ -1289,7 +1289,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorUMAX) {
Register CopyReg0 = Copies[Copies.size() - 2];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0x0Cu, Res0.One.getZExtValue());
EXPECT_EQ(0xF0u, Res0.Zero.getZExtValue());
@@ -1317,7 +1317,7 @@ TEST_F(AArch64GISelMITest, TestVectorKnownBitsUMax) {
GTEST_SKIP();
Register CopyUMax = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits KnownUmax = Info.getKnownBits(CopyUMax);
EXPECT_EQ(64u, KnownUmax.getBitWidth());
@@ -1355,7 +1355,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorUMIN) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0x01u, Res0.One.getZExtValue());
EXPECT_EQ(0xF4u, Res0.Zero.getZExtValue());
@@ -1388,7 +1388,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSMAX) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0x40u, Res0.One.getZExtValue());
EXPECT_EQ(0x3Fu, Res0.Zero.getZExtValue());
@@ -1421,7 +1421,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorSMIN) {
Register CopyReg0 = Copies[Copies.size() - 1];
MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits Res0 = Info.getKnownBits(SrcReg0);
EXPECT_EQ(0x80u, Res0.One.getZExtValue());
EXPECT_EQ(0x7Eu, Res0.Zero.getZExtValue());
@@ -1451,7 +1451,7 @@ TEST_F(AArch64GISelMITest, TestVectorInvalidQueries) {
MachineInstr *BiggerSizedCopy = MRI->getVRegDef(BiggerSizedCopyReg);
Register BiggerSizedShl = BiggerSizedCopy->getOperand(1).getReg();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
KnownBits EqSizeRes = Info.getKnownBits(EqSizedShl);
KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
@@ -1490,7 +1490,7 @@ TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {
Register CopyAssert63 = Copies[Copies.size() - 2];
Register CopyAssert3 = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
MachineInstr *Copy;
Register SrcReg;
KnownBits Res;
@@ -1543,7 +1543,7 @@ TEST_F(AArch64GISelMITest, TestNumSignBitsUAddoOverflow) {
Register CopyOverflow = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
// Assert sign-extension from vector boolean
EXPECT_EQ(32u, Info.computeNumSignBits(CopyOverflow));
@@ -1565,7 +1565,7 @@ TEST_F(AArch64GISelMITest, TestKnwonBitsUnmergeVectorScalar) {
Register CopyOverflow = Copies[Copies.size() - 1];
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
EXPECT_EQ(0xFFF0u, Info.getKnownBits(CopyOverflow).Zero.getZExtValue());
}
@@ -1586,7 +1586,7 @@ TEST_F(AArch64GISelMITest, TestKnwonBitsUnmergeVectorVector) {
if (!TM)
GTEST_SKIP();
- GISelKnownBits Info(*MF);
+ GISelValueTracking Info(*MF);
Register CopyOverflow1 = Copies[Copies.size() - 2];
EXPECT_EQ(0xF0u, Info.getKnownBits(CopyOverflow1).Zero.getZExtValue());
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
index 625e2c92b1119..7928c91a3b28c 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
@@ -8,7 +8,7 @@
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
#include "GISelMITest.h"
-#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
#define DEBUG_TYPE "legalizer-test"
@@ -66,10 +66,10 @@ TEST_F(AArch64GISelMITest, BasicLegalizerTest) {
ALegalizerInfo LI(MF->getSubtarget());
LostDebugLocObserver LocObserver(DEBUG_TYPE);
- GISelKnownBits KB(*MF);
+ GISelValueTracking VT(*MF);
Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
- *MF, LI, {&LocObserver}, LocObserver, B, &KB);
+ *MF, LI, {&LocObserver}, LocObserver, B, &VT);
EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
EXPECT_TRUE(Result.Changed);
@@ -104,7 +104,7 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningTest) {
ALegalizerInfo LI(MF->getSubtarget());
LostDebugLocObserver LocObserver(DEBUG_TYPE);
- GISelKnownBits KB(*MF);
+ GISelValueTracking VT(*MF);
// The events here unfold as follows:
// 1. First, the function is scanned pre-forming the worklist of artifacts:
@@ -161,7 +161,7 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningTest) {
// the process follows def-use chains, making them shorter at each step, thus
// combining everything that can be combined in O(n) time.
Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
- *MF, LI, {&LocObserver}, LocObserver, B, &KB);
+ *MF, LI, {&LocObserver}, LocObserver, B, &VT);
EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
EXPECT_TRUE(Result.Changed);
@@ -198,10 +198,10 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningManyCopiesTest) {
ALegalizerInfo LI(MF->getSubtarget());
LostDebugLocObserver LocObserver(DEBUG_TYPE);
- GISelKnownBits KB(*MF);
+ GISelValueTracking VT(*MF);
Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
- *MF, LI, {&LocObserver}, LocObserver, B, &KB);
+ *MF, LI, {&LocObserver}, LocObserver, B, &VT);
EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
EXPECT_TRUE(Result.Changed);
diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
index dc9e449195159..430e175491b07 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/GlobalISel/BUILD.gn
@@ -23,7 +23,7 @@ static_library("GlobalISel") {
"CombinerHelperVectorOps.cpp",
"GIMatchTableExecutor.cpp",
"GISelChangeObserver.cpp",
- "GISelKnownBits.cpp",
+ "GISelValueTracking.cpp",
"GlobalISel.cpp",
"IRTranslator.cpp",
"InlineAsmLowering.cpp",
More information about the llvm-commits
mailing list