[llvm] 5e0efc0 - Reland "[GlobalISel][LLT] Introduce FPInfo for LLT (Enable bfloat, ppc128float and others in GlobalISel) (#155107)" (#188502)

via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 4 05:57:18 PDT 2026


Author: Alan Li
Date: 2026-04-04T05:57:13-07:00
New Revision: 5e0efc0f1d79f6d0050efa0d21a47dd95ace5ddf

URL: https://github.com/llvm/llvm-project/commit/5e0efc0f1d79f6d0050efa0d21a47dd95ace5ddf
DIFF: https://github.com/llvm/llvm-project/commit/5e0efc0f1d79f6d0050efa0d21a47dd95ace5ddf.diff

LOG: Reland "[GlobalISel][LLT] Introduce FPInfo for LLT (Enable bfloat, ppc128float and others in GlobalISel) (#155107)" (#188502)

This is a reland of https://github.com/llvm/llvm-project/pull/155107
along with a fix for old gcc builds.

This patch is reverted in
https://github.com/llvm/llvm-project/pull/188344 due to compilation
failures described in
https://github.com/llvm/llvm-project/pull/155107#issuecomment-4121292756

The fix to old gcc builds is to remove `constexpr` modifiers in the
original patch in 0721d8e7768c011b8cf2d4d223ca6eca3392b1f9

Added: 
    llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
    llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
    llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/include/llvm/CodeGenTypes/LowLevelType.h
    llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
    llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
    llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
    llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
    llvm/lib/CodeGen/GlobalISel/Utils.cpp
    llvm/lib/CodeGen/LowLevelTypeUtils.cpp
    llvm/lib/CodeGen/MIRParser/MIParser.cpp
    llvm/lib/CodeGen/MachineVerifier.cpp
    llvm/lib/CodeGenTypes/LowLevelType.cpp
    llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-subvector.ll
    llvm/test/CodeGen/AArch64/GlobalISel/pr168872.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument-multiple.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument.ll
    llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
    llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
    llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
    llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
    llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
    llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
    llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
    llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
    llvm/test/TableGen/GlobalISelEmitter/HwModes.td
    llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
    llvm/unittests/CodeGen/LowLevelTypeTest.cpp
    llvm/utils/TableGen/Common/CMakeLists.txt
    llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
    llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
    llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 6d916b76cee55..7815ad686cbaa 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -650,6 +650,8 @@ class IRTranslator : public MachineFunctionPass {
 
   StackProtectorDescriptor SPDescriptor;
 
+  bool mayTranslateUserTypes(const User &U) const;
+
   /// Switch analysis and optimization.
   class GISelSwitchLowering : public SwitchCG::SwitchLowering {
   public:

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 9afccf86576f2..8d27f85c04b87 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -402,6 +402,11 @@ LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx,
 LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx,
                                               unsigned FromTypeIdx);
 
+/// Change the scalar size or element size to have the same scalar size as the
+/// type \p NewTy. Unlike changeElementTo, this discards pointer types and only
+/// changes the size.
+LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx, LLT NewTy);
+
 /// Widen the scalar type or vector element type for the given type index to the
 /// next power of 2.
 LLVM_ABI LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx,
@@ -1048,7 +1053,7 @@ class LegalizeRuleSet {
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::WidenScalar,
                     scalarOrEltNarrowerThan(TypeIdx, Ty.getScalarSizeInBits()),
-                    changeElementTo(typeIdx(TypeIdx), Ty));
+                    changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the scalar or element is at least as wide as Ty.
@@ -1059,7 +1064,7 @@ class LegalizeRuleSet {
     return actionIf(LegalizeAction::WidenScalar,
                     all(Predicate, scalarOrEltNarrowerThan(
                                        TypeIdx, Ty.getScalarSizeInBits())),
-                    changeElementTo(typeIdx(TypeIdx), Ty));
+                    changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the vector size is at least as wide as VectorSize by promoting the
@@ -1078,7 +1083,8 @@ class LegalizeRuleSet {
           const LLT VecTy = Query.Types[TypeIdx];
           unsigned NumElts = VecTy.getNumElements();
           unsigned MinSize = VectorSize / NumElts;
-          LLT NewTy = LLT::fixed_vector(NumElts, LLT::scalar(MinSize));
+          LLT NewTy = LLT::fixed_vector(
+              NumElts, VecTy.getElementType().changeElementSize(MinSize));
           return std::make_pair(TypeIdx, NewTy);
         });
   }
@@ -1089,7 +1095,7 @@ class LegalizeRuleSet {
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::WidenScalar,
                     scalarNarrowerThan(TypeIdx, Ty.getSizeInBits()),
-                    changeTo(typeIdx(TypeIdx), Ty));
+                    changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
   LegalizeRuleSet &minScalar(bool Pred, unsigned TypeIdx, const LLT Ty) {
     if (!Pred)
@@ -1110,7 +1116,7 @@ class LegalizeRuleSet {
                  QueryTy.getSizeInBits() < Ty.getSizeInBits() &&
                  Predicate(Query);
         },
-        changeTo(typeIdx(TypeIdx), Ty));
+        changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the scalar is at most as wide as Ty.
@@ -1119,7 +1125,7 @@ class LegalizeRuleSet {
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::NarrowScalar,
                     scalarOrEltWiderThan(TypeIdx, Ty.getScalarSizeInBits()),
-                    changeElementTo(typeIdx(TypeIdx), Ty));
+                    changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the scalar is at most as wide as Ty.
@@ -1128,7 +1134,7 @@ class LegalizeRuleSet {
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::NarrowScalar,
                     scalarWiderThan(TypeIdx, Ty.getSizeInBits()),
-                    changeTo(typeIdx(TypeIdx), Ty));
+                    changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Conditionally limit the maximum size of the scalar.
@@ -1146,7 +1152,7 @@ class LegalizeRuleSet {
                  QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
                  Predicate(Query);
         },
-        changeElementTo(typeIdx(TypeIdx), Ty));
+        changeElementSizeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Limit the range of scalar sizes to MinTy and MaxTy.
@@ -1211,9 +1217,8 @@ class LegalizeRuleSet {
                  Predicate(Query);
         },
         [=](const LegalityQuery &Query) {
-          LLT T = Query.Types[LargeTypeIdx];
-          if (T.isPointerVector())
-            T = T.changeElementType(LLT::scalar(T.getScalarSizeInBits()));
+          LLT T = Query.Types[TypeIdx].changeElementSize(
+              Query.Types[LargeTypeIdx].getScalarSizeInBits());
           return std::make_pair(TypeIdx, T);
         });
   }

diff  --git a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
index 51a298eb8b247..f0c1758a4c9cc 100644
--- a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
+++ b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
@@ -41,6 +41,6 @@ LLVM_ABI LLT getLLTForMVT(MVT Ty);
 /// Get the appropriate floating point arithmetic semantic based on the bit size
 /// of the given scalar LLT.
 LLVM_ABI const llvm::fltSemantics &getFltSemanticForLLT(LLT Ty);
-}
+} // namespace llvm
 
 #endif // LLVM_CODEGEN_LOWLEVELTYPEUTILS_H

diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index fbed0d5378db4..59a0f2d2e0c2a 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -443,7 +443,7 @@ class LLVM_ABI TargetLoweringBase {
   /// G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT,
   /// G_INSERT_SUBVECTOR, and G_EXTRACT_SUBVECTOR
   LLT getVectorIdxLLT(const DataLayout &DL) const {
-    return LLT::scalar(getVectorIdxWidth(DL));
+    return LLT::integer(getVectorIdxWidth(DL));
   }
 
   /// Returns the type to be used for the EVL/AVL operand of VP nodes:

diff  --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 92def9209d47a..920bd9ad9bf51 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -9,27 +9,32 @@
 /// Implement a low-level type suitable for MachineInstr level instruction
 /// selection.
 ///
-/// For a type attached to a MachineInstr, we only care about 2 details: total
-/// size and the number of vector lanes (if any). Accordingly, there are 4
-/// possible valid type-kinds:
+/// For a type attached to a MachineInstr, we care about total
+/// size, the number of vector lanes (if any)
+/// and the kind of the type (anyscalar, integer, float and etc).
+/// Floating point are filled with APFloat::Semantics to make them
+/// distinguishable.
 ///
-///    * `sN` for scalars and aggregates
-///    * `<N x sM>` for vectors, which must have at least 2 elements.
-///    * `pN` for pointers
+/// Earlier other information required for correct selection was expected to be
+/// carried only by the opcode, or non-type flags. For example the distinction
+/// between G_ADD and G_FADD for int/float or fast-math flags.
 ///
-/// Other information required for correct selection is expected to be carried
-/// by the opcode, or non-type flags. For example the distinction between G_ADD
-/// and G_FADD for int/float or fast-math flags.
+/// Now we also able to rely on the kind of the type.
+/// This may be useful to distinguish 
diff erent types of the same size used at
+/// the same opcode, for example, G_FADD with half vs G_FADD with bfloat16.
 ///
 //===----------------------------------------------------------------------===//
 
 #ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
 #define LLVM_CODEGEN_LOWLEVELTYPE_H
 
+#include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/bit.h"
 #include "llvm/CodeGenTypes/MachineValueType.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
 #include <cassert>
 
 namespace llvm {
@@ -39,68 +44,166 @@ class raw_ostream;
 
 class LLT {
 public:
+  using FpSemantics = APFloat::Semantics;
+
+  enum class Kind : uint8_t {
+    INVALID,
+    ANY_SCALAR,
+    INTEGER,
+    FLOAT,
+    POINTER,
+    VECTOR_ANY,
+    VECTOR_INTEGER,
+    VECTOR_FLOAT,
+    VECTOR_POINTER,
+  };
+
+  constexpr static Kind toVector(Kind Ty) {
+    if (Ty == Kind::POINTER)
+      return Kind::VECTOR_POINTER;
+
+    if (Ty == Kind::INTEGER)
+      return Kind::VECTOR_INTEGER;
+
+    if (Ty == Kind::FLOAT)
+      return Kind::VECTOR_FLOAT;
+
+    return Kind::VECTOR_ANY;
+  }
+
+  constexpr static Kind toScalar(Kind Ty) {
+    if (Ty == Kind::VECTOR_POINTER)
+      return Kind::POINTER;
+
+    if (Ty == Kind::VECTOR_INTEGER)
+      return Kind::INTEGER;
+
+    if (Ty == Kind::VECTOR_FLOAT)
+      return Kind::FLOAT;
+
+    return Kind::ANY_SCALAR;
+  }
+
   /// Get a low-level scalar or aggregate "bag of bits".
   static constexpr LLT scalar(unsigned SizeInBits) {
-    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
-               ElementCount::getFixed(0), SizeInBits,
-               /*AddressSpace=*/0};
+    return LLT{Kind::ANY_SCALAR, ElementCount::getFixed(0), SizeInBits};
+  }
+
+  static LLT integer(unsigned SizeInBits) {
+    if (!getUseExtended())
+      return LLT::scalar(SizeInBits);
+
+    return LLT{Kind::INTEGER, ElementCount::getFixed(0), SizeInBits};
+  }
+
+  static LLT floatingPoint(const FpSemantics &Sem) {
+    if (!getUseExtended())
+      return LLT::scalar(
+          APFloat::getSizeInBits(APFloatBase::EnumToSemantics(Sem)));
+
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0),
+               APFloat::getSizeInBits(APFloatBase::EnumToSemantics(Sem)), Sem};
   }
 
   /// Get a low-level token; just a scalar with zero bits (or no size).
   static constexpr LLT token() {
-    return LLT{/*isPointer=*/false, /*isVector=*/false,
-               /*isScalar=*/true,   ElementCount::getFixed(0),
-               /*SizeInBits=*/0,
-               /*AddressSpace=*/0};
+    return LLT{Kind::ANY_SCALAR, ElementCount::getFixed(0),
+               /*SizeInBits=*/0};
   }
 
   /// Get a low-level pointer in the given address space.
   static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
     assert(SizeInBits > 0 && "invalid pointer size");
-    return LLT{/*isPointer=*/true, /*isVector=*/false, /*isScalar=*/false,
-               ElementCount::getFixed(0), SizeInBits, AddressSpace};
+    return LLT{Kind::POINTER, ElementCount::getFixed(0), SizeInBits,
+               AddressSpace};
   }
 
   /// Get a low-level vector of some number of elements and element width.
   static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
     assert(!EC.isScalar() && "invalid number of vector elements");
-    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false,
-               EC, ScalarSizeInBits, /*AddressSpace=*/0};
+    return LLT{Kind::VECTOR_ANY, EC, ScalarSizeInBits};
   }
 
   /// Get a low-level vector of some number of elements and element type.
   static constexpr LLT vector(ElementCount EC, LLT ScalarTy) {
     assert(!EC.isScalar() && "invalid number of vector elements");
     assert(!ScalarTy.isVector() && "invalid vector element type");
-    return LLT{ScalarTy.isPointer(),
-               /*isVector=*/true,
-               /*isScalar=*/false,
-               EC,
-               ScalarTy.getSizeInBits().getFixedValue(),
-               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
+
+    Kind Info = toVector(ScalarTy.Info);
+    if (ScalarTy.isPointer())
+      return LLT{Info, EC, ScalarTy.getSizeInBits().getFixedValue(),
+                 ScalarTy.getAddressSpace()};
+    if (ScalarTy.isFloat())
+      return LLT{Info, EC, ScalarTy.getSizeInBits().getFixedValue(),
+                 ScalarTy.getFpSemantics()};
+
+    return LLT{Info, EC, ScalarTy.getSizeInBits().getFixedValue()};
+  }
+
+  // FIXME: Remove this builder
+  static LLT floatIEEE(unsigned SizeInBits) {
+    if (!getUseExtended())
+      return LLT::scalar(SizeInBits);
+
+    switch (SizeInBits) {
+    default:
+      llvm_unreachable("Wrong SizeInBits for IEEE Floating point!");
+    case 16:
+      return float16();
+    case 32:
+      return float32();
+    case 64:
+      return float64();
+    case 128:
+      return float128();
+    }
   }
 
+  // Get a bfloat16 value.
+  static constexpr LLT bfloat16() {
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 16,
+               FpSemantics::S_BFloat};
+  }
   /// Get a 16-bit IEEE half value.
-  /// TODO: Add IEEE semantics to type - This currently returns a simple `scalar(16)`.
   static constexpr LLT float16() {
-    return scalar(16);
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 16,
+               FpSemantics::S_IEEEhalf};
   }
-
   /// Get a 32-bit IEEE float value.
   static constexpr LLT float32() {
-    return scalar(32);
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 32,
+               FpSemantics::S_IEEEsingle};
   }
-
   /// Get a 64-bit IEEE double value.
   static constexpr LLT float64() {
-    return scalar(64);
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 64,
+               FpSemantics::S_IEEEdouble};
+  }
+
+  /// Get a 80-bit X86 floating point value.
+  static constexpr LLT x86fp80() {
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 80,
+               FpSemantics::S_x87DoubleExtended};
+  }
+
+  /// Get a 128-bit IEEE quad value.
+  static constexpr LLT float128() {
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 128,
+               FpSemantics::S_IEEEquad};
+  }
+
+  /// Get a 128-bit PowerPC double double value.
+  static constexpr LLT ppcf128() {
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), 128,
+               FpSemantics::S_PPCDoubleDouble};
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
   /// width.
   static constexpr LLT fixed_vector(unsigned NumElements,
                                     unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
+    return vector(ElementCount::getFixed(NumElements),
+                  LLT::scalar(ScalarSizeInBits));
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
@@ -113,7 +216,8 @@ class LLT {
   /// width.
   static constexpr LLT scalable_vector(unsigned MinNumElements,
                                        unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
+    return vector(ElementCount::getScalable(MinNumElements),
+                  LLT::scalar(ScalarSizeInBits));
   }
 
   /// Get a low-level scalable vector of some number of elements and element
@@ -132,27 +236,81 @@ class LLT {
     return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
   }
 
-  explicit constexpr LLT(bool isPointer, bool isVector, bool isScalar,
-                         ElementCount EC, uint64_t SizeInBits,
+  explicit constexpr LLT(Kind Info, ElementCount EC, uint64_t SizeInBits)
+      : LLT() {
+    init(Info, EC, SizeInBits);
+  }
+
+  explicit constexpr LLT(Kind Info, ElementCount EC, uint64_t SizeInBits,
                          unsigned AddressSpace)
       : LLT() {
-    init(isPointer, isVector, isScalar, EC, SizeInBits, AddressSpace);
+    init(Info, EC, SizeInBits, AddressSpace);
+  }
+
+  explicit constexpr LLT(Kind Info, ElementCount EC, uint64_t SizeInBits,
+                         FpSemantics Sem)
+      : LLT() {
+    init(Info, EC, SizeInBits, Sem);
   }
-  explicit constexpr LLT()
-      : IsScalar(false), IsPointer(false), IsVector(false), RawData(0) {}
 
   LLVM_ABI explicit LLT(MVT VT);
+  explicit constexpr LLT() : RawData(0), Info(static_cast<Kind>(0)) {}
 
-  constexpr bool isValid() const { return IsScalar || RawData != 0; }
-  constexpr bool isScalar() const { return IsScalar; }
-  constexpr bool isToken() const { return IsScalar && RawData == 0; };
-  constexpr bool isVector() const { return isValid() && IsVector; }
-  constexpr bool isPointer() const {
-    return isValid() && IsPointer && !IsVector;
+  constexpr bool isToken() const {
+    return Info == Kind::ANY_SCALAR && RawData == 0;
+  }
+  constexpr bool isValid() const { return isToken() || RawData != 0; }
+  constexpr bool isAnyScalar() const { return Info == Kind::ANY_SCALAR; }
+  constexpr bool isInteger() const { return Info == Kind::INTEGER; }
+  constexpr bool isFloat() const { return Info == Kind::FLOAT; }
+  constexpr bool isPointer() const { return Info == Kind::POINTER; }
+  constexpr bool isAnyVector() const { return Info == Kind::VECTOR_ANY; }
+  constexpr bool isIntegerVector() const {
+    return Info == Kind::VECTOR_INTEGER;
+  }
+  constexpr bool isFloatVector() const { return Info == Kind::VECTOR_FLOAT; }
+  constexpr bool isPointerVector() const {
+    return Info == Kind::VECTOR_POINTER;
   }
-  constexpr bool isPointerVector() const { return IsPointer && isVector(); }
   constexpr bool isPointerOrPointerVector() const {
-    return IsPointer && isValid();
+    return isPointer() || isPointerVector();
+  }
+
+  constexpr bool isScalar() const {
+    return Info == Kind::ANY_SCALAR || Info == Kind::INTEGER ||
+           Info == Kind::FLOAT;
+  }
+  constexpr bool isScalar(unsigned Size) const {
+    return isScalar() && getScalarSizeInBits() == Size;
+  }
+  constexpr bool isVector() const {
+    return Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER ||
+           Info == Kind::VECTOR_FLOAT || Info == Kind::VECTOR_POINTER;
+  }
+
+  constexpr bool isInteger(unsigned Size) const {
+    return isInteger() && getScalarSizeInBits() == Size;
+  }
+
+  constexpr bool isFloat(unsigned Size) const {
+    return isFloat() && getScalarSizeInBits() == Size;
+  }
+  constexpr bool isFloat(FpSemantics Sem) const {
+    return isFloat() && getFpSemantics() == Sem;
+  }
+  // FIXME: Remove or rework this predicate
+  constexpr bool isFloatIEEE() const {
+    return isFloat(APFloatBase::S_IEEEhalf) ||
+           isFloat(APFloatBase::S_IEEEsingle) ||
+           isFloat(APFloatBase::S_IEEEdouble) ||
+           isFloat(APFloatBase::S_IEEEquad);
+  }
+  constexpr bool isBFloat16() const { return isFloat(FpSemantics::S_BFloat); }
+  constexpr bool isX86FP80() const {
+    return isFloat(FpSemantics::S_x87DoubleExtended);
+  }
+  constexpr bool isPPCF128() const {
+    return isFloat(FpSemantics::S_PPCDoubleDouble);
   }
 
   /// Returns the number of elements in a vector LLT. Must only be called on
@@ -177,12 +335,18 @@ class LLT {
   /// if the LLT is not a vector type.
   constexpr bool isFixedVector() const { return isVector() && !isScalable(); }
 
+  constexpr bool isFixedVector(unsigned NumElements,
+                               unsigned ScalarSize) const {
+    return isFixedVector() && getNumElements() == NumElements &&
+           getScalarSizeInBits() == ScalarSize;
+  }
+
   /// Returns true if the LLT is a scalable vector. Returns false otherwise,
   /// even if the LLT is not a vector type.
   constexpr bool isScalableVector() const { return isVector() && isScalable(); }
 
   constexpr ElementCount getElementCount() const {
-    assert(IsVector && "cannot get number of elements on scalar/aggregate");
+    assert(isVector() && "cannot get number of elements on scalar/aggregate");
     return ElementCount::get(getFieldValue(VectorElementsFieldInfo),
                              isScalable());
   }
@@ -203,10 +367,16 @@ class LLT {
     return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
   }
 
-  constexpr LLT getScalarType() const {
-    return isVector() ? getElementType() : *this;
+  LLT getScalarType() const { return isVector() ? getElementType() : *this; }
+
+  constexpr FpSemantics getFpSemantics() const {
+    assert((isFloat() || isFloatVector()) &&
+           "cannot get FP info for non float type");
+    return FpSemantics(getFieldValue(FpSemanticFieldInfo));
   }
 
+  constexpr Kind getKind() const { return Info; }
+
   /// Returns a vector with the same number of elements but the new element
   /// type. Must only be called on vector types.
   constexpr LLT changeVectorElementType(LLT NewEltTy) const {
@@ -221,17 +391,26 @@ class LLT {
 
   /// If this type is a vector, return a vector with the same number of elements
   /// but the new element size. Otherwise, return the new element type. Invalid
-  /// for pointer types. For pointer types, use changeElementType.
-  constexpr LLT changeElementSize(unsigned NewEltSize) const {
+  /// for pointer types. For these, use changeElementType.
+  LLT changeElementSize(unsigned NewEltSize) const {
     assert(!isPointerOrPointerVector() &&
            "invalid to directly change element size for pointers");
-    return isVector() ? LLT::vector(getElementCount(), NewEltSize)
-                      : LLT::scalar(NewEltSize);
+    if (isVector())
+      return LLT::vector(getElementCount(),
+                         getElementType().changeElementSize(NewEltSize));
+
+    if (isInteger())
+      return LLT::integer(NewEltSize);
+
+    if (isFloatIEEE())
+      return LLT::floatIEEE(NewEltSize);
+
+    return LLT::scalar(NewEltSize);
   }
 
   /// Return a vector with the same element type and the new element count. Must
   /// be called on vector types.
-  constexpr LLT changeVectorElementCount(ElementCount EC) const {
+  LLT changeVectorElementCount(ElementCount EC) const {
     assert(isVector() &&
            "cannot change vector element count of non-vector type");
     return LLT::vector(EC, getElementType());
@@ -239,17 +418,21 @@ class LLT {
 
   /// Return a vector or scalar with the same element type and the new element
   /// count.
-  constexpr LLT changeElementCount(ElementCount EC) const {
+  LLT changeElementCount(ElementCount EC) const {
     return LLT::scalarOrVector(EC, getScalarType());
   }
 
+  LLT changeElementCount(unsigned NumElements) const {
+    return changeElementCount(ElementCount::getFixed(NumElements));
+  }
+
   /// Return a type that is \p Factor times smaller. Reduces the number of
   /// elements if this is a vector, or the bitwidth for scalar/pointers. Does
   /// not attempt to handle cases that aren't evenly divisible.
-  constexpr LLT divide(int Factor) const {
+  LLT divide(int Factor) const {
     assert(Factor != 1);
-    assert((!isScalar() || getScalarSizeInBits() != 0) &&
-           "cannot divide scalar of size zero");
+    assert((!isScalar() || getScalarSizeInBits() != 0) && !isFloat() &&
+           "cannot divide scalar of size zero and floats");
     if (isVector()) {
       assert(getElementCount().isKnownMultipleOf(Factor));
       return scalarOrVector(getElementCount().divideCoefficientBy(Factor),
@@ -257,13 +440,16 @@ class LLT {
     }
 
     assert(getScalarSizeInBits() % Factor == 0);
+    if (isInteger())
+      return integer(getScalarSizeInBits() / Factor);
+
     return scalar(getScalarSizeInBits() / Factor);
   }
 
   /// Produce a vector type that is \p Factor times bigger, preserving the
   /// element type. For a scalar or pointer, this will produce a new vector with
   /// \p Factor elements.
-  constexpr LLT multiplyElements(int Factor) const {
+  LLT multiplyElements(int Factor) const {
     if (isVector()) {
       return scalarOrVector(getElementCount().multiplyCoefficientBy(Factor),
                             getElementType());
@@ -289,12 +475,28 @@ class LLT {
   }
 
   /// Returns the vector's element type. Only valid for vector types.
-  constexpr LLT getElementType() const {
+  LLT getElementType() const {
     assert(isVector() && "cannot get element type of scalar/aggregate");
-    if (IsPointer)
+    if (isPointerVector())
       return pointer(getAddressSpace(), getScalarSizeInBits());
-    else
-      return scalar(getScalarSizeInBits());
+
+    if (isFloatVector())
+      return floatingPoint(getFpSemantics());
+
+    if (isIntegerVector())
+      return integer(getScalarSizeInBits());
+
+    return scalar(getScalarSizeInBits());
+  }
+
+  LLT changeToInteger() const {
+    if (isPointer() || isPointerVector())
+      return *this;
+
+    if (isVector())
+      return vector(getElementCount(), LLT::integer(getScalarSizeInBits()));
+
+    return integer(getSizeInBits());
   }
 
   LLVM_ABI void print(raw_ostream &OS) const;
@@ -303,79 +505,99 @@ class LLT {
   LLVM_DUMP_METHOD void dump() const;
 #endif
 
-  constexpr bool operator==(const LLT &RHS) const {
-    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
-           IsScalar == RHS.IsScalar && RHS.RawData == RawData;
+  bool operator==(const LLT &RHS) const {
+    if (isAnyScalar() || RHS.isAnyScalar())
+      return isScalar() == RHS.isScalar() &&
+             getScalarSizeInBits() == RHS.getScalarSizeInBits();
+
+    if (isVector() && RHS.isVector())
+      return getElementType() == RHS.getElementType() &&
+             getElementCount() == RHS.getElementCount();
+
+    return Info == RHS.Info && RawData == RHS.RawData;
   }
 
-  constexpr bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
+  bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
 
   friend struct DenseMapInfo<LLT>;
   friend class GISelInstProfileBuilder;
 
 private:
   /// LLT is packed into 64 bits as follows:
-  /// isScalar : 1
-  /// isPointer : 1
-  /// isVector  : 1
-  /// with 61 bits remaining for Kind-specific data, packed in bitfields
-  /// as described below. As there isn't a simple portable way to pack bits
-  /// into bitfields, here the 
diff erent fields in the packed structure is
+  /// RawData : 60
+  /// Info : 4
+  /// RawData remaining for Kind-specific data, packed in
+  /// bitfields as described below. As there isn't a simple portable way to pack
+  /// bits into bitfields, here the 
diff erent fields in the packed structure is
   /// described in static const *Field variables. Each of these variables
   /// is a 2-element array, with the first element describing the bitfield size
   /// and the second element describing the bitfield offset.
   ///
-  /// +--------+---------+--------+----------+----------------------+
-  /// |isScalar|isPointer|isVector| RawData  |Notes                 |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   0    |    0     |Invalid               |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   1    |    0     |Tombstone Key         |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   0    |    0     |Empty Key             |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   1    |    0    |   0    |    0     |Token                 |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   1    |    0    |   0    | non-zero |Scalar                |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   0    | non-zero |Pointer               |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   1    | non-zero |Vector of non-pointer |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   1    | non-zero |Vector of pointer     |
-  /// +--------+---------+--------+----------+----------------------+
-  ///
-  /// Everything else is reserved.
-  typedef int BitFieldInfo[2];
-  ///
-  /// This is how the bitfields are packed per Kind:
+  /*
+                                --- LLT ---
+
+   63       56       47       39       31       23       15       7      0
+   |        |        |        |        |        |        |        |      |
+  |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|
+   %%%%                                                                     (1)
+       .... ........ ........ ........ ....                                 (2)
+       **** ******** ****                                                   (3)
+                         ~~~~ ~~~~~~~~ ~~~~~~~~ ~~~~                        (4)
+                                           #### ####                        (5)
+                                                    ^^^^ ^^^^^^^^ ^^^^      (6)
+                                                                         @  (7)
+
+  (1) Kind:                [63:60]
+  (2) ScalarSize:          [59:28]
+  (3) PointerSize:         [59:44]
+  (4) PointerAddressSpace: [43:20]
+  (5) FpSemantics:         [27:20]
+  (6) VectorElements:      [19:4]
+  (7) VectorScalable:      [0:0]
+
+  */
+
+  /// This is how the LLT are packed per Kind:
   /// * Invalid:
-  ///   gets encoded as RawData == 0, as that is an invalid encoding, since for
-  ///   valid encodings, SizeInBits/SizeOfElement must be larger than 0.
+  ///   Info: [63:60] = 0
+  ///   RawData: [59:0] = 0;
+  ///
   /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
-  ///   SizeInBits: 32;
-  static constexpr BitFieldInfo ScalarSizeFieldInfo{32, 29};
+  ///   Info: [63:60];
+  ///   SizeOfElement: [59:28];
+  ///   FpSemantics: [27:20];
+  ///
   /// * Pointer (isPointer == 1 && isVector == 0):
-  ///   SizeInBits: 16;
-  ///   AddressSpace: 24;
-  static constexpr BitFieldInfo PointerSizeFieldInfo{16, 45};
-  static constexpr BitFieldInfo PointerAddressSpaceFieldInfo{24, 21};
+  ///   Info: [63:60];
+  ///   SizeInBits: [59:44];
+  ///   AddressSpace: [43:20];
+  ///
   /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
-  ///   NumElements: 16;
-  ///   SizeOfElement: 32;
-  ///   Scalable: 1;
-  static constexpr BitFieldInfo VectorElementsFieldInfo{16, 5};
-  static constexpr BitFieldInfo VectorScalableFieldInfo{1, 0};
+  ///   Info: [63:60]
+  ///   SizeOfElement: [59:28];
+  ///   FpSemantics: [27:20];
+  ///   VectorElements: [19:4];
+  ///   Scalable: [0:0];
+  ///
   /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
-  ///   NumElements: 16;
-  ///   SizeOfElement: 16;
-  ///   AddressSpace: 24;
-  ///   Scalable: 1;
+  ///   Info: [63:60];
+  ///   SizeInBits: [59:44];
+  ///   AddressSpace: [43:20];
+  ///   VectorElements: [19:4];
+  ///   Scalable: [0:0];
+
+  /// BitFieldInfo: {Size, Offset}
+  typedef int BitFieldInfo[2];
+  static_assert(bit_width_constexpr((uint32_t)APFloat::S_MaxSemantics) <= 8);
+  static constexpr BitFieldInfo VectorScalableFieldInfo{1, 0};
+  static constexpr BitFieldInfo VectorElementsFieldInfo{16, 4};
+  static constexpr BitFieldInfo FpSemanticFieldInfo{8, 20};
+  static constexpr BitFieldInfo PointerAddressSpaceFieldInfo{24, 20};
+  static constexpr BitFieldInfo ScalarSizeFieldInfo{32, 28};
+  static constexpr BitFieldInfo PointerSizeFieldInfo{16, 44};
 
-  uint64_t IsScalar : 1;
-  uint64_t IsPointer : 1;
-  uint64_t IsVector : 1;
-  uint64_t RawData : 61;
+  uint64_t RawData : 60;
+  Kind Info : 4;
 
   static constexpr uint64_t getMask(const BitFieldInfo FieldInfo) {
     const int FieldSizeInBits = FieldInfo[0];
@@ -395,21 +617,51 @@ class LLT {
     return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
   }
 
-  constexpr void init(bool IsPointer, bool IsVector, bool IsScalar,
-                      ElementCount EC, uint64_t SizeInBits,
+  // Init for scalar and integer single or vector types
+  constexpr void init(Kind Info, ElementCount EC, uint64_t SizeInBits) {
+    assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
+           "Not enough bits in LLT to represent size");
+    assert((Info == Kind::ANY_SCALAR || Info == Kind::INTEGER ||
+            Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER) &&
+           "Called initializer for wrong LLT Kind");
+    this->Info = Info;
+    RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+
+    if (Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER) {
+      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo) |
+                maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
+                maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
+    }
+  }
+
+  // Init pointer or pointer vector
+  constexpr void init(Kind Info, ElementCount EC, uint64_t SizeInBits,
                       unsigned AddressSpace) {
     assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
            "Not enough bits in LLT to represent size");
-    this->IsPointer = IsPointer;
-    this->IsVector = IsVector;
-    this->IsScalar = IsScalar;
-    if (IsPointer) {
-      RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
-                maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
-    } else {
-      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+    assert((Info == Kind::POINTER || Info == Kind::VECTOR_POINTER) &&
+           "Called initializer for wrong LLT Kind");
+    this->Info = Info;
+    RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
+              maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
+
+    if (Info == Kind::VECTOR_POINTER) {
+      RawData |= maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
+                 maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
     }
-    if (IsVector) {
+  }
+
+  constexpr void init(Kind Info, ElementCount EC, uint64_t SizeInBits,
+                      FpSemantics Sem) {
+    assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
+           "Not enough bits in LLT to represent size");
+    assert((Info == Kind::FLOAT || Info == Kind::VECTOR_FLOAT) &&
+           "Called initializer for wrong LLT Kind");
+    this->Info = Info;
+    RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo) |
+              maskAndShift((uint64_t)Sem, FpSemanticFieldInfo);
+
+    if (Info == Kind::VECTOR_FLOAT) {
       RawData |= maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
                  maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
     }
@@ -417,36 +669,39 @@ class LLT {
 
 public:
   constexpr uint64_t getUniqueRAWLLTData() const {
-    return ((uint64_t)RawData) << 3 | ((uint64_t)IsScalar) << 2 |
-           ((uint64_t)IsPointer) << 1 | ((uint64_t)IsVector);
+    return ((uint64_t)RawData) | ((uint64_t)Info) << 60;
   }
+
+  static bool getUseExtended() { return ExtendedLLT; }
+  static void setUseExtended(bool Enable) { ExtendedLLT = Enable; }
+
+private:
+  static bool ExtendedLLT;
 };
 
-inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
+inline raw_ostream &operator<<(raw_ostream &OS, const LLT &Ty) {
   Ty.print(OS);
   return OS;
 }
 
-template<> struct DenseMapInfo<LLT> {
+template <> struct DenseMapInfo<LLT> {
   static inline LLT getEmptyKey() {
     LLT Invalid;
-    Invalid.IsPointer = true;
+    Invalid.Info = LLT::Kind::POINTER;
     return Invalid;
   }
   static inline LLT getTombstoneKey() {
     LLT Invalid;
-    Invalid.IsVector = true;
+    Invalid.Info = LLT::Kind::VECTOR_ANY;
     return Invalid;
   }
   static inline unsigned getHashValue(const LLT &Ty) {
     uint64_t Val = Ty.getUniqueRAWLLTData();
     return DenseMapInfo<uint64_t>::getHashValue(Val);
   }
-  static bool isEqual(const LLT &LHS, const LLT &RHS) {
-    return LHS == RHS;
-  }
+  static bool isEqual(const LLT &LHS, const LLT &RHS) { return LHS == RHS; }
 };
 
-}
+} // namespace llvm
 
 #endif // LLVM_CODEGEN_LOWLEVELTYPE_H

diff  --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index bea2e0046bf44..4775dd6c8be31 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -335,13 +335,24 @@ mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
   if (LCMTy == LLTy) {
     // Common case where no padding is needed.
     assert(DstRegs.size() == 1);
-    return B.buildConcatVectors(DstRegs[0], SrcRegs);
+
+    SmallVector<Register, 8> ConcatRegs(SrcRegs.size());
+    llvm::copy(SrcRegs, ConcatRegs.begin());
+
+    if (LLTy.getScalarType() != PartLLT.getScalarType())
+      for (size_t I = 0, E = SrcRegs.size(); I != E; ++I) {
+        auto BitcastDst =
+            MRI.getType(SrcRegs[I]).changeElementType(LLTy.getScalarType());
+        ConcatRegs[I] = B.buildBitcast(BitcastDst, SrcRegs[I]).getReg(0);
+      }
+
+    return B.buildConcatVectors(DstRegs[0], ConcatRegs);
   }
 
   // We need to create an unmerge to the result registers, which may require
   // widening the original value.
   Register UnmergeSrcReg;
-  if (LCMTy != PartLLT) {
+  if (LCMTy.getSizeInBits() != PartLLT.getSizeInBits()) {
     assert(DstRegs.size() == 1);
     return B.buildDeleteTrailingVectorElements(
         DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));
@@ -352,15 +363,18 @@ mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
     UnmergeSrcReg = SrcRegs[0];
   }
 
-  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
+  size_t NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
 
   SmallVector<Register, 8> PadDstRegs(NumDst);
   llvm::copy(DstRegs, PadDstRegs.begin());
 
   // Create the excess dead defs for the unmerge.
-  for (int I = DstRegs.size(); I != NumDst; ++I)
+  for (size_t I = DstRegs.size(); I != NumDst; ++I)
     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
 
+  if (PartLLT != LCMTy)
+    UnmergeSrcReg = B.buildBitcast(LCMTy, UnmergeSrcReg).getReg(0);
+
   if (PadDstRegs.size() == 1)
     return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
@@ -448,7 +462,7 @@ void CallLowering::buildCopyFromRegs(MachineIRBuilder &B,
       PartLLT = NewTy;
     }
 
-    if (LLTy.getScalarType() == PartLLT.getElementType()) {
+    if (LLTy.getScalarSizeInBits() == PartLLT.getScalarSizeInBits()) {
       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
     } else {
       unsigned I = 0;
@@ -794,8 +808,8 @@ bool CallLowering::handleAssignments(ValueHandler &Handler,
     const MVT ValVT = VA.getValVT();
     const MVT LocVT = VA.getLocVT();
 
-    const LLT LocTy(LocVT);
-    const LLT ValTy(ValVT);
+    const LLT LocTy = getLLTForMVT(LocVT);
+    const LLT ValTy = getLLTForMVT(ValVT);
     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
     const EVT OrigVT = TLI->getValueType(DL, Args[i].Ty);
     // Use the EVT here to strip pointerness.
@@ -1299,7 +1313,7 @@ void CallLowering::ValueHandler::copyArgumentMemory(
       MemSize, DstAlign);
 
   const LLT PtrTy = MRI.getType(DstPtr);
-  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
+  const LLT SizeTy = LLT::integer(PtrTy.getSizeInBits());
 
   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
@@ -1401,7 +1415,7 @@ void CallLowering::IncomingValueHandler::assignValueToReg(
     Register ValVReg, Register PhysReg, const CCValAssign &VA,
     ISD::ArgFlagsTy Flags) {
   const MVT LocVT = VA.getLocVT();
-  const LLT LocTy(LocVT);
+  const LLT LocTy = getLLTForMVT(LocVT);
   const LLT RegTy = MRI.getType(ValVReg);
 
   if (isCopyCompatibleType(RegTy, LocTy)) {

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 4d92754ae9b48..177170575fe07 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2358,9 +2358,22 @@ void CombinerHelper::applyCombineUnmergeConstant(
   assert((MI.getNumOperands() - 1 == Csts.size()) &&
          "Not enough operands to replace all defs");
   unsigned NumElems = MI.getNumOperands() - 1;
-  for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
-    Register DstReg = MI.getOperand(Idx).getReg();
-    Builder.buildConstant(DstReg, Csts[Idx]);
+
+  Register SrcReg = MI.getOperand(NumElems).getReg();
+
+  if (MRI.getType(SrcReg).isFloat()) {
+    APFloat Val(getFltSemanticForLLT(MRI.getType(MI.getOperand(0).getReg())));
+
+    for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
+      Register DstReg = MI.getOperand(Idx).getReg();
+      Val.convertFromAPInt(Csts[Idx], false, detail::rmTowardZero);
+      Builder.buildFConstant(DstReg, Val);
+    }
+  } else {
+    for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
+      Register DstReg = MI.getOperand(Idx).getReg();
+      Builder.buildConstant(DstReg, Csts[Idx]);
+    }
   }
 
   MI.eraseFromParent();

diff  --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 18c90ceee3867..44f3c7f1e0dfd 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -306,23 +306,9 @@ void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
   MachinePreds[Edge].push_back(NewPred);
 }
 
-static bool targetSupportsBF16Type(const MachineFunction *MF) {
-  return MF->getTarget().getTargetTriple().isSPIRV();
-}
-
-static bool containsBF16Type(const User &U) {
-  // BF16 cannot currently be represented by LLT, to avoid miscompiles we
-  // prevent any instructions using them. FIXME: This can be removed once LLT
-  // supports bfloat.
-  return U.getType()->getScalarType()->isBFloatTy() ||
-         any_of(U.operands(), [](Value *V) {
-           return V->getType()->getScalarType()->isBFloatTy();
-         });
-}
-
 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
                                      MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   // Get or create a virtual register for each value.
@@ -344,7 +330,7 @@ bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
 
 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
                                     MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   Register Op0 = getOrCreateVReg(*U.getOperand(0));
@@ -364,7 +350,7 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
 
 bool IRTranslator::translateCompare(const User &U,
                                     MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   auto *CI = cast<CmpInst>(&U);
@@ -918,7 +904,7 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
   auto Cst = getOrCreateVReg(
       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
-  auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
+  auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::integer(1), Sub, Cst);
 
   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
 
@@ -949,7 +935,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
     return;
   }
 
-  const LLT i1Ty = LLT::scalar(1);
+  const LLT i1Ty = LLT::integer(1);
   // Build the compare.
   if (!CB.CmpMHS) {
     const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
@@ -1161,7 +1147,7 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
   if (!B.FallthroughUnreachable) {
     // Conditional branch to the default block.
     auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
-    auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
+    auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::integer(1),
                                   RangeSub, RangeCst);
     MIB.buildBrCond(RangeCmp, *B.Default);
   }
@@ -1187,15 +1173,16 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
     // would need to be to shift a 1 bit in that position.
     auto MaskTrailingZeros =
         MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
-    Cmp =
-        MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
-            .getReg(0);
+    Cmp = MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::integer(1), Reg,
+                        MaskTrailingZeros)
+              .getReg(0);
   } else if (PopCount == BB.Range) {
     // There is only one zero bit in the range, test for it directly.
     auto MaskTrailingOnes =
         MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
-    Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
-              .getReg(0);
+    Cmp =
+        MIB.buildICmp(CmpInst::ICMP_NE, LLT::integer(1), Reg, MaskTrailingOnes)
+            .getReg(0);
   } else {
     // Make desired shift.
     auto CstOne = MIB.buildConstant(SwitchTy, 1);
@@ -1205,7 +1192,7 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
     auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
     auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
     auto CstZero = MIB.buildConstant(SwitchTy, 0);
-    Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
+    Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::integer(1), AndOp, CstZero)
               .getReg(0);
   }
 
@@ -1590,7 +1577,7 @@ bool IRTranslator::translateBitCast(const User &U,
 
 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
                                  MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   uint32_t Flags = 0;
@@ -1752,7 +1739,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
     SrcRegs.push_back(SrcReg);
   }
 
-  LLT SizeTy = LLT::scalar(MinPtrSize);
+  LLT SizeTy = LLT::integer(MinPtrSize);
 
   // The size operand should be the minimum of the pointer sizes.
   Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
@@ -2705,7 +2692,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
 
 bool IRTranslator::translateInlineAsm(const CallBase &CB,
                                       MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(CB) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(CB))
     return false;
 
   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
@@ -2796,7 +2783,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
 }
 
 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   const CallInst &CI = cast<CallInst>(U);
@@ -3067,7 +3054,7 @@ bool IRTranslator::translateInvoke(const User &U,
 /// intrinsics such as amdgcn.kill.
 bool IRTranslator::translateCallBr(const User &U,
                                    MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false; // see translateCall
 
   const CallBrInst &I = cast<CallBrInst>(U);
@@ -3284,7 +3271,8 @@ bool IRTranslator::translateInsertElement(const User &U,
   if (!Idx)
     Idx = getOrCreateVReg(*U.getOperand(2));
   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
-    const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
+    const LLT VecIdxTy =
+        MRI->getType(Idx).changeElementSize(PreferredVecIdxWidth);
     Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
   }
   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
@@ -3365,7 +3353,8 @@ bool IRTranslator::translateExtractElement(const User &U,
   if (!Idx)
     Idx = getOrCreateVReg(*U.getOperand(1));
   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
-    const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
+    const LLT VecIdxTy =
+        MRI->getType(Idx).changeElementSize(PreferredVecIdxWidth);
     Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
   }
   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
@@ -3533,7 +3522,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
 
 bool IRTranslator::translateAtomicRMW(const User &U,
                                       MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U) && !targetSupportsBF16Type(MF))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
@@ -3880,6 +3869,22 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
   return true;
 }
 
+bool IRTranslator::mayTranslateUserTypes(const User &U) const {
+  const TargetMachine &TM = TLI->getTargetMachine();
+  if (LLT::getUseExtended())
+    return true;
+
+  // BF16 cannot currently be represented by default LLT. To avoid miscompiles
+  // we prevent any instructions using them by default in all targets that do
+  // not explicitly enable it via LLT::setUseExtended(true).
+  // SPIRV target is exception.
+  return TM.getTargetTriple().isSPIRV() ||
+         (!U.getType()->getScalarType()->isBFloatTy() &&
+          !any_of(U.operands(), [](Value *V) {
+            return V->getType()->getScalarType()->isBFloatTy();
+          }));
+}
+
 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
                                       MachineBasicBlock &MBB) {
   for (auto &BTB : SL->BitTestCases) {
@@ -4080,7 +4085,7 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
 
   // Perform the comparison.
   auto Cmp =
-      CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
+      CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::integer(1), Guard, GuardVal);
   // If the guard/stackslot do not equal, branch to failure MBB.
   CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
   // Otherwise branch to success MBB.

diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
index ded4df4edc14c..f7ead3b41ea49 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
@@ -67,8 +67,17 @@ LegalizeMutation LegalizeMutations::changeElementSizeTo(unsigned TypeIdx,
   return [=](const LegalityQuery &Query) {
     const LLT OldTy = Query.Types[TypeIdx];
     const LLT NewTy = Query.Types[FromTypeIdx];
-    const LLT NewEltTy = LLT::scalar(NewTy.getScalarSizeInBits());
-    return std::make_pair(TypeIdx, OldTy.changeElementType(NewEltTy));
+    return std::make_pair(TypeIdx,
+                          OldTy.changeElementSize(NewTy.getScalarSizeInBits()));
+  };
+}
+
+LegalizeMutation LegalizeMutations::changeElementSizeTo(unsigned TypeIdx,
+                                                        LLT NewTy) {
+  return [=](const LegalityQuery &Query) {
+    const LLT OldTy = Query.Types[TypeIdx];
+    return std::make_pair(TypeIdx,
+                          OldTy.changeElementSize(NewTy.getScalarSizeInBits()));
   };
 }
 

diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 95ec652860ead..ac3d1f62ce9cc 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1182,7 +1182,7 @@ LegalizerHelper::createFCMPLibcall(MachineInstr &MI,
                                 const CmpInst::Predicate ICmpPred,
                                 const DstOp &Res) -> Register {
     // FCMP libcall always returns an i32, and needs an ICMP with #0.
-    constexpr LLT TempLLT = LLT::scalar(32);
+    LLT TempLLT = LLT::integer(32);
     Register Temp = MRI.createGenericVirtualRegister(TempLLT);
     // Generate libcall, holding result in Temp
     const auto Status = createLibcall(
@@ -3302,10 +3302,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
       LLT VecTy = MRI.getType(VecReg);
       Observer.changingInstr(MI);
 
-      widenScalarSrc(
-          MI,
-          VecTy.changeVectorElementType(LLT::scalar(WideTy.getSizeInBits())), 1,
-          TargetOpcode::G_ANYEXT);
+      widenScalarSrc(MI, LLT::vector(VecTy.getElementCount(), WideTy), 1,
+                     TargetOpcode::G_ANYEXT);
 
       widenScalarDst(MI, WideTy, 0);
       Observer.changedInstr(MI);
@@ -3959,7 +3957,7 @@ LegalizerHelper::bitcastConcatVector(MachineInstr &MI, unsigned TypeIdx,
 
   // Check if bitcast is Legal
   auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
-  LLT SrcScalTy = LLT::scalar(SrcTy.getSizeInBits());
+  LLT SrcScalTy = CastTy.getScalarType();
 
   // Check if the build vector is Legal
   if (!LI.isLegal({TargetOpcode::G_BUILD_VECTOR, {CastTy, SrcScalTy}})) {
@@ -4157,6 +4155,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
   LLT MemTy = MMO.getMemoryType();
   MachineFunction &MF = MIRBuilder.getMF();
 
+  LLT EltTy = MemTy.getScalarType();
+
   unsigned MemSizeInBits = MemTy.getSizeInBits();
   unsigned MemStoreSizeInBits = 8 * MemTy.getSizeInBytes();
 
@@ -4166,7 +4166,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
 
     // Promote to a byte-sized load if not loading an integral number of
     // bytes.  For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
-    LLT WideMemTy = LLT::scalar(MemStoreSizeInBits);
+    LLT WideMemTy = EltTy.changeElementSize(MemStoreSizeInBits);
     MachineMemOperand *NewMMO =
         MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), WideMemTy);
 
@@ -4268,12 +4268,21 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
 
   LLT PtrTy = MRI.getType(PtrReg);
   unsigned AnyExtSize = PowerOf2Ceil(DstTy.getSizeInBits());
-  LLT AnyExtTy = LLT::scalar(AnyExtSize);
+
+  LLT AnyExtTy;
+  LLT OffsetCstRes;
+  if (EltTy.isPointer()) {
+    AnyExtTy = LLT::scalar(AnyExtSize);
+    OffsetCstRes = LLT::scalar(PtrTy.getSizeInBits());
+  } else {
+    AnyExtTy = EltTy.changeElementSize(AnyExtSize);
+    OffsetCstRes = EltTy.changeElementSize(PtrTy.getSizeInBits());
+  }
+
   auto LargeLoad = MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, AnyExtTy,
                                              PtrReg, *LargeMMO);
 
-  auto OffsetCst = MIRBuilder.buildConstant(LLT::scalar(PtrTy.getSizeInBits()),
-                                            LargeSplitSize / 8);
+  auto OffsetCst = MIRBuilder.buildConstant(OffsetCstRes, LargeSplitSize / 8);
   Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
   auto SmallPtr = MIRBuilder.buildObjectPtrOffset(PtrAddReg, PtrReg, OffsetCst);
   auto SmallLoad = MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), AnyExtTy,
@@ -4410,7 +4419,7 @@ LegalizerHelper::scalarizeVectorBooleanStore(GStore &StoreMI) {
     // We need to build an integer scalar of the vector bit pattern.
     // It's not legal for us to add padding when storing a vector.
     unsigned NumBits = MemTy.getSizeInBits();
-    LLT IntTy = LLT::scalar(NumBits);
+    LLT IntTy = LLT::integer(NumBits);
     auto CurrVal = MIRBuilder.buildConstant(IntTy, 0);
     LLT IdxTy = TLI.getVectorIdxLLT(MF.getDataLayout());
 
@@ -4594,12 +4603,28 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
     return Legalized;
   }
   case TargetOpcode::G_FNEG: {
-    auto [Res, SubByReg] = MI.getFirst2Regs();
-    LLT Ty = MRI.getType(Res);
+    auto [Res, ResTy, SubByReg, SubByRegTy] = MI.getFirst2RegLLTs();
+    LLT TyInt =
+        ResTy.changeElementType(LLT::integer(ResTy.getScalarSizeInBits()));
+    Register CastedSubByReg = SubByReg;
+
+    if (!SubByRegTy.getScalarType().isAnyScalar() &&
+        !SubByRegTy.getScalarType().isInteger()) {
+      auto BitcastDst = SubByRegTy.changeElementType(
+          LLT::integer(SubByRegTy.getScalarSizeInBits()));
+      CastedSubByReg = MIRBuilder.buildBitcast(BitcastDst, SubByReg).getReg(0);
+    }
 
     auto SignMask = MIRBuilder.buildConstant(
-        Ty, APInt::getSignMask(Ty.getScalarSizeInBits()));
-    MIRBuilder.buildXor(Res, SubByReg, SignMask);
+        TyInt, APInt::getSignMask(TyInt.getScalarSizeInBits()));
+
+    if (ResTy != TyInt) {
+      Register NewDst =
+          MIRBuilder.buildXor(TyInt, CastedSubByReg, SignMask).getReg(0);
+      MIRBuilder.buildBitcast(Res, NewDst);
+    } else
+      MIRBuilder.buildXor(Res, CastedSubByReg, SignMask).getReg(0);
+
     MI.eraseFromParent();
     return Legalized;
   }
@@ -5469,7 +5494,7 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
     return UnableToLegalize;
 
   LLT PtrTy = MRI.getType(AddrReg);
-  const LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
+  const LLT OffsetTy = LLT::integer(PtrTy.getSizeInBits());
 
   unsigned TotalSize = ValTy.getSizeInBits();
 
@@ -6164,7 +6189,7 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
   // input. If that isn't small enough, the resulting pieces will be further
   // legalized.
   const unsigned NewBitSize = DstEltSize / 2;
-  const LLT HalfTy = LLT::scalar(NewBitSize);
+  const LLT HalfTy = DstTy.getScalarType().changeElementSize(NewBitSize);
   const LLT CondTy = LLT::scalar(1);
 
   if (auto VRegAndVal = getIConstantVRegValWithLookThrough(Amt, MRI)) {
@@ -8313,11 +8338,13 @@ LegalizerHelper::lowerU64ToF64BitFloatOps(MachineInstr &MI) {
 static LegalizerHelper::LegalizeResult
 loweri64tof16ITOFP(MachineInstr &MI, Register Dst, LLT DstTy, Register Src,
                    LLT SrcTy, MachineIRBuilder &MIRBuilder) {
+  auto DstFpTy =
+      SrcTy.changeElementType(LLT::floatIEEE(SrcTy.getScalarSizeInBits()));
   auto M1 = MI.getOpcode() == TargetOpcode::G_UITOFP
-                ? MIRBuilder.buildUITOFP(SrcTy, Src)
-                : MIRBuilder.buildSITOFP(SrcTy, Src);
-  LLT S32Ty = SrcTy.changeElementSize(32);
-  auto M2 = MIRBuilder.buildFPTrunc(S32Ty, M1);
+                ? MIRBuilder.buildUITOFP(DstFpTy, Src)
+                : MIRBuilder.buildSITOFP(DstFpTy, Src);
+  LLT F32Ty = DstFpTy.changeElementSize(32);
+  auto M2 = MIRBuilder.buildFPTrunc(F32Ty, M1);
   MIRBuilder.buildFPTrunc(Dst, M2);
   MI.eraseFromParent();
   return LegalizerHelper::Legalized;
@@ -8356,11 +8383,11 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerUITOFP(MachineInstr &MI) {
 LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI) {
   auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
 
-  const LLT S64 = LLT::scalar(64);
-  const LLT S32 = LLT::scalar(32);
-  const LLT S1 = LLT::scalar(1);
+  const LLT I64 = LLT::integer(64);
+  const LLT I32 = LLT::integer(32);
+  const LLT I1 = LLT::integer(1);
 
-  if (SrcTy == S1) {
+  if (SrcTy == I1) {
     auto True = MIRBuilder.buildFConstant(DstTy, -1.0);
     auto False = MIRBuilder.buildFConstant(DstTy, 0.0);
     MIRBuilder.buildSelect(Dst, Src, True, False);
@@ -8371,26 +8398,26 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI) {
   if (DstTy.getScalarSizeInBits() == 16 && SrcTy.getScalarSizeInBits() == 64)
     return loweri64tof16ITOFP(MI, Dst, DstTy, Src, SrcTy, MIRBuilder);
 
-  if (SrcTy != S64)
+  if (SrcTy != I64)
     return UnableToLegalize;
 
-  if (DstTy == S32) {
+  if (DstTy.getScalarSizeInBits() == 32) {
     // signed cl2f(long l) {
     //   long s = l >> 63;
     //   float r = cul2f((l + s) ^ s);
     //   return s ? -r : r;
     // }
     Register L = Src;
-    auto SignBit = MIRBuilder.buildConstant(S64, 63);
-    auto S = MIRBuilder.buildAShr(S64, L, SignBit);
+    auto SignBit = MIRBuilder.buildConstant(I64, 63);
+    auto S = MIRBuilder.buildAShr(I64, L, SignBit);
 
-    auto LPlusS = MIRBuilder.buildAdd(S64, L, S);
-    auto Xor = MIRBuilder.buildXor(S64, LPlusS, S);
-    auto R = MIRBuilder.buildUITOFP(S32, Xor);
+    auto LPlusS = MIRBuilder.buildAdd(I64, L, S);
+    auto Xor = MIRBuilder.buildXor(I64, LPlusS, S);
+    auto R = MIRBuilder.buildUITOFP(I32, Xor);
 
-    auto RNeg = MIRBuilder.buildFNeg(S32, R);
-    auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S,
-                                            MIRBuilder.buildConstant(S64, 0));
+    auto RNeg = MIRBuilder.buildFNeg(I32, R);
+    auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, I1, S,
+                                            MIRBuilder.buildConstant(I64, 0));
     MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R);
     MI.eraseFromParent();
     return Legalized;
@@ -8538,14 +8565,13 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
   if (AreExactFloatBounds) {
     // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat.
     auto MaxC = MIRBuilder.buildFConstant(SrcTy, MinFloat);
-    auto MaxP = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT,
-                                     SrcTy.changeElementSize(1), Src, MaxC);
+    auto MaxP =
+        MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::integer(1), Src, MaxC);
     auto Max = MIRBuilder.buildSelect(SrcTy, MaxP, Src, MaxC);
     // Clamp by MaxFloat from above. NaN cannot occur.
     auto MinC = MIRBuilder.buildFConstant(SrcTy, MaxFloat);
-    auto MinP =
-        MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, SrcTy.changeElementSize(1), Max,
-                             MinC, MachineInstr::FmNoNans);
+    auto MinP = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, LLT::integer(1), Max,
+                                     MinC, MachineInstr::FmNoNans);
     auto Min =
         MIRBuilder.buildSelect(SrcTy, MinP, Max, MinC, MachineInstr::FmNoNans);
     // Convert clamped value to integer. In the unsigned case we're done,
@@ -8558,8 +8584,8 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
 
     // Otherwise, select 0 if Src is NaN.
     auto FpToInt = MIRBuilder.buildFPTOSI(DstTy, Min);
-    auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO,
-                                       DstTy.changeElementSize(1), Src, Src);
+    auto IsZero =
+        MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, LLT::integer(1), Src, Src);
     MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0),
                            FpToInt);
     MI.eraseFromParent();
@@ -8574,15 +8600,13 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
 
   // If Src ULT MinFloat, select MinInt. In particular, this also selects
   // MinInt if Src is NaN.
-  auto ULT =
-      MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, SrcTy.changeElementSize(1), Src,
-                           MIRBuilder.buildFConstant(SrcTy, MinFloat));
+  auto ULT = MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, LLT::integer(1), Src,
+                                  MIRBuilder.buildFConstant(SrcTy, MinFloat));
   auto Max = MIRBuilder.buildSelect(
       DstTy, ULT, MIRBuilder.buildConstant(DstTy, MinInt), FpToInt);
   // If Src OGT MaxFloat, select MaxInt.
-  auto OGT =
-      MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Src,
-                           MIRBuilder.buildFConstant(SrcTy, MaxFloat));
+  auto OGT = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::integer(1), Src,
+                                  MIRBuilder.buildFConstant(SrcTy, MaxFloat));
 
   // In the unsigned case we are done, because we mapped NaN to MinInt, which
   // is already zero.
@@ -8596,8 +8620,8 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
   // Otherwise, select 0 if Src is NaN.
   auto Min = MIRBuilder.buildSelect(
       DstTy, OGT, MIRBuilder.buildConstant(DstTy, MaxInt), Max);
-  auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO,
-                                     DstTy.changeElementSize(1), Src, Src);
+  auto IsZero =
+      MIRBuilder.buildFCmp(CmpInst::FCMP_UNO, LLT::integer(1), Src, Src);
   MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0), Min);
   MI.eraseFromParent();
   return Legalized;
@@ -8789,7 +8813,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerMinMax(MachineInstr &MI) {
   auto [Dst, Src0, Src1] = MI.getFirst3Regs();
 
   const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode());
-  LLT CmpType = MRI.getType(Dst).changeElementType(LLT::scalar(1));
+  LLT CmpType = MRI.getType(Dst).changeElementType(LLT::integer(1));
 
   auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1);
   MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1);
@@ -8852,26 +8876,43 @@ LegalizerHelper::lowerFCopySign(MachineInstr &MI) {
   const int Src0Size = Src0Ty.getScalarSizeInBits();
   const int Src1Size = Src1Ty.getScalarSizeInBits();
 
-  auto SignBitMask = MIRBuilder.buildConstant(
-    Src0Ty, APInt::getSignMask(Src0Size));
+  LLT DstIntTy =
+      DstTy.changeElementType(LLT::integer(DstTy.getScalarSizeInBits()));
+  LLT Src0IntTy = Src0Ty.changeElementType(LLT::integer(Src0Size));
+  LLT Src1IntTy = Src1Ty.changeElementType(LLT::integer(Src1Size));
+
+  Register Src0Int = Src0;
+  Register Src1Int = Src1;
+
+  if (!(Src0Ty.getScalarType().isAnyScalar() ||
+        Src0Ty.getScalarType().isInteger()))
+    Src0Int = MIRBuilder.buildBitcast(Src0IntTy, Src0).getReg(0);
+
+  if (!(Src1Ty.getScalarType().isAnyScalar() ||
+        Src1Ty.getScalarType().isInteger()))
+    Src1Int = MIRBuilder.buildBitcast(Src1IntTy, Src1).getReg(0);
+
+  auto SignBitMask =
+      MIRBuilder.buildConstant(Src0IntTy, APInt::getSignMask(Src0Size));
 
   auto NotSignBitMask = MIRBuilder.buildConstant(
-    Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1));
+      Src0IntTy, APInt::getLowBitsSet(Src0Size, Src0Size - 1));
 
-  Register And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask).getReg(0);
+  Register And0 =
+      MIRBuilder.buildAnd(Src0IntTy, Src0Int, NotSignBitMask).getReg(0);
   Register And1;
   if (Src0Ty == Src1Ty) {
-    And1 = MIRBuilder.buildAnd(Src1Ty, Src1, SignBitMask).getReg(0);
+    And1 = MIRBuilder.buildAnd(Src1IntTy, Src1Int, SignBitMask).getReg(0);
   } else if (Src0Size > Src1Size) {
-    auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size);
-    auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1);
-    auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt);
+    auto ShiftAmt = MIRBuilder.buildConstant(Src0IntTy, Src0Size - Src1Size);
+    auto Zext = MIRBuilder.buildZExt(Src0IntTy, Src1Int);
+    auto Shift = MIRBuilder.buildShl(Src0IntTy, Zext, ShiftAmt);
     And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask).getReg(0);
   } else {
-    auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size);
-    auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt);
-    auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift);
-    And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask).getReg(0);
+    auto ShiftAmt = MIRBuilder.buildConstant(Src1IntTy, Src1Size - Src0Size);
+    auto Shift = MIRBuilder.buildLShr(Src1IntTy, Src1Int, ShiftAmt);
+    auto Trunc = MIRBuilder.buildTrunc(Src0IntTy, Shift);
+    And1 = MIRBuilder.buildAnd(Src0IntTy, Trunc, SignBitMask).getReg(0);
   }
 
   // Be careful about setting nsz/nnan/ninf on every instruction, since the
@@ -8882,7 +8923,12 @@ LegalizerHelper::lowerFCopySign(MachineInstr &MI) {
   // We masked the sign bit and the not-sign bit, so these are disjoint.
   Flags |= MachineInstr::Disjoint;
 
-  MIRBuilder.buildOr(Dst, And0, And1, Flags);
+  if (DstTy == DstIntTy)
+    MIRBuilder.buildOr(Dst, And0, And1, Flags).getReg(0);
+  else {
+    Register NewDst = MIRBuilder.buildOr(DstIntTy, And0, And1, Flags).getReg(0);
+    MIRBuilder.buildBitcast(Dst, NewDst);
+  }
 
   MI.eraseFromParent();
   return Legalized;
@@ -10202,12 +10248,17 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSelect(MachineInstr &MI) {
   auto [DstReg, DstTy, MaskReg, MaskTy, Op1Reg, Op1Ty, Op2Reg, Op2Ty] =
       MI.getFirst4RegLLTs();
 
+  LLT Op1TyInt =
+      Op1Ty.changeElementType(LLT::integer(Op1Ty.getScalarSizeInBits()));
+
   bool IsEltPtr = DstTy.isPointerOrPointerVector();
   if (IsEltPtr) {
     LLT ScalarPtrTy = LLT::scalar(DstTy.getScalarSizeInBits());
     LLT NewTy = DstTy.changeElementType(ScalarPtrTy);
     Op1Reg = MIRBuilder.buildPtrToInt(NewTy, Op1Reg).getReg(0);
+    Op1Ty = MRI.getType(Op1Reg);
     Op2Reg = MIRBuilder.buildPtrToInt(NewTy, Op2Reg).getReg(0);
+    Op2Ty = MRI.getType(Op2Reg);
     DstTy = NewTy;
   }
 
@@ -10242,6 +10293,17 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSelect(MachineInstr &MI) {
     return UnableToLegalize;
   }
 
+  if (!(Op1Ty.getScalarType().isAnyScalar() ||
+        Op1Ty.getScalarType().isInteger()))
+    Op1Reg = MIRBuilder.buildBitcast(Op1TyInt, Op1Reg).getReg(0);
+
+  if (!(Op2Ty.getScalarType().isAnyScalar() ||
+        Op2Ty.getScalarType().isInteger())) {
+    auto Op2TyInt =
+        Op2Ty.changeElementType(LLT::integer(Op2Ty.getScalarSizeInBits()));
+    Op2Reg = MIRBuilder.buildBitcast(Op2TyInt, Op2Reg).getReg(0);
+  }
+
   auto NotMask = MIRBuilder.buildNot(MaskTy, MaskReg);
   auto NewOp1 = MIRBuilder.buildAnd(MaskTy, Op1Reg, MaskReg);
   auto NewOp2 = MIRBuilder.buildAnd(MaskTy, Op2Reg, NotMask);
@@ -10249,7 +10311,12 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSelect(MachineInstr &MI) {
     auto Or = MIRBuilder.buildOr(DstTy, NewOp1, NewOp2);
     MIRBuilder.buildIntToPtr(DstReg, Or);
   } else {
-    MIRBuilder.buildOr(DstReg, NewOp1, NewOp2);
+    if (DstTy == Op1TyInt)
+      MIRBuilder.buildOr(DstReg, NewOp1, NewOp2);
+    else {
+      auto Or = MIRBuilder.buildOr(Op1TyInt, NewOp1, NewOp2);
+      MIRBuilder.buildBitcast(DstReg, Or.getReg(0));
+    }
   }
   MI.eraseFromParent();
   return Legalized;
@@ -10365,16 +10432,36 @@ LegalizerHelper::lowerAbsDiffToMinMax(MachineInstr &MI) {
 }
 
 LegalizerHelper::LegalizeResult LegalizerHelper::lowerFAbs(MachineInstr &MI) {
-  Register SrcReg = MI.getOperand(1).getReg();
-  Register DstReg = MI.getOperand(0).getReg();
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+  LLT TyInt =
+      DstTy.changeElementType(LLT::integer(DstTy.getScalarSizeInBits()));
+  Register CastedSrc = SrcReg;
 
-  LLT Ty = MRI.getType(DstReg);
+  if (!(SrcTy.getScalarType().isAnyScalar() ||
+        SrcTy.getScalarType().isInteger())) {
+    auto SrcTyInt =
+        SrcTy.changeElementType(LLT::integer(SrcTy.getScalarSizeInBits()));
+    CastedSrc = MIRBuilder.buildBitcast(SrcTyInt, SrcReg).getReg(0);
+  }
+
+  if (MRI.getType(DstReg) != TyInt) {
+    // Reset sign bit
+    Register NewDst =
+        MIRBuilder
+            .buildAnd(TyInt, CastedSrc,
+                      MIRBuilder.buildConstant(
+                          TyInt, APInt::getSignedMaxValue(
+                                     DstTy.getScalarSizeInBits())))
+            .getReg(0);
 
-  // Reset sign bit
-  MIRBuilder.buildAnd(
-      DstReg, SrcReg,
-      MIRBuilder.buildConstant(
-          Ty, APInt::getSignedMaxValue(Ty.getScalarSizeInBits())));
+    MIRBuilder.buildBitcast(DstReg, NewDst);
+  } else
+    MIRBuilder
+        .buildAnd(
+            DstReg, CastedSrc,
+            MIRBuilder.buildConstant(
+                TyInt, APInt::getSignedMaxValue(DstTy.getScalarSizeInBits())))
+        .getReg(0);
 
   MI.eraseFromParent();
   return Legalized;

diff  --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index e379975672c2d..63c9ada9b059d 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -587,7 +587,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
            Op.getLLTTy(*getMRI()).getSizeInBits())
     Opcode = TargetOpcode::G_TRUNC;
   else
-    assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
+    assert(Res.getLLTTy(*getMRI()).getSizeInBits() ==
+           Op.getLLTTy(*getMRI()).getSizeInBits());
 
   return buildInstr(Opcode, Res, Op);
 }
@@ -786,7 +787,7 @@ MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
          "Expected Src to match Dst elt ty");
   auto UndefVec = buildUndef(DstTy);
-  auto Zero = buildConstant(LLT::scalar(64), 0);
+  auto Zero = buildConstant(LLT::integer(64), 0);
   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);

diff  --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 380b41a22f2b0..d019633369163 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -790,15 +790,35 @@ llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
   return std::nullopt;
 }
 
+static GBuildVector *getBuildVectorLikeDef(Register Reg,
+                                           const MachineRegisterInfo &MRI) {
+  if (auto *BV = getOpcodeDef<GBuildVector>(Reg, MRI))
+    return BV;
+
+  auto *Bitcast = getOpcodeDef(TargetOpcode::G_BITCAST, Reg, MRI);
+  if (!Bitcast)
+    return nullptr;
+
+  auto [Dst, DstTy, Src, SrcTy] = Bitcast->getFirst2RegLLTs();
+  if (!SrcTy.isVector() || !DstTy.isVector())
+    return nullptr;
+  if (SrcTy.getElementCount() != DstTy.getElementCount())
+    return nullptr;
+  if (SrcTy.getScalarSizeInBits() != DstTy.getScalarSizeInBits())
+    return nullptr;
+
+  return getOpcodeDef<GBuildVector>(Src, MRI);
+}
+
 SmallVector<APInt>
 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
                               const Register Op2,
                               const MachineRegisterInfo &MRI) {
-  auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
+  auto *SrcVec2 = getBuildVectorLikeDef(Op2, MRI);
   if (!SrcVec2)
     return SmallVector<APInt>();
 
-  auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
+  auto *SrcVec1 = getBuildVectorLikeDef(Op1, MRI);
   if (!SrcVec1)
     return SmallVector<APInt>();
 

diff  --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index 936c9fbb2fff0..a213c81eae98e 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -18,7 +18,7 @@
 using namespace llvm;
 
 LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
-  if (auto VTy = dyn_cast<VectorType>(&Ty)) {
+  if (auto *VTy = dyn_cast<VectorType>(&Ty)) {
     auto EC = VTy->getElementCount();
     LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL);
     if (EC.isScalar())
@@ -26,7 +26,7 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     return LLT::vector(EC, ScalarTy);
   }
 
-  if (auto PTy = dyn_cast<PointerType>(&Ty)) {
+  if (auto *PTy = dyn_cast<PointerType>(&Ty)) {
     unsigned AddrSpace = PTy->getAddressSpace();
     return LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
   }
@@ -36,6 +36,35 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     // concerned.
     auto SizeInBits = DL.getTypeSizeInBits(&Ty);
     assert(SizeInBits != 0 && "invalid zero-sized type");
+
+    // Return simple scalar
+    if (!LLT::getUseExtended())
+      return LLT::scalar(SizeInBits);
+
+    // Choose more precise LLT variant
+    if (Ty.isFloatingPointTy())
+      switch (Ty.getTypeID()) {
+      default:
+        llvm_unreachable("Unhandled LLVM IR floating point type");
+      case Type::HalfTyID:
+        return LLT::float16();
+      case Type::BFloatTyID:
+        return LLT::bfloat16();
+      case Type::FloatTyID:
+        return LLT::float32();
+      case Type::DoubleTyID:
+        return LLT::float64();
+      case Type::X86_FP80TyID:
+        return LLT::x86fp80();
+      case Type::FP128TyID:
+        return LLT::float128();
+      case Type::PPC_FP128TyID:
+        return LLT::ppcf128();
+      }
+
+    if (Ty.isIntegerTy())
+      return LLT::integer(SizeInBits);
+
     return LLT::scalar(SizeInBits);
   }
 
@@ -46,12 +75,24 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
 }
 
 MVT llvm::getMVTForLLT(LLT Ty) {
-  if (!Ty.isVector())
-    return MVT::getIntegerVT(Ty.getSizeInBits());
+  if (Ty.isVector())
+    return MVT::getVectorVT(getMVTForLLT(Ty.getElementType()),
+                            Ty.getElementCount());
+
+  if (Ty.isFloat()) {
+    if (Ty.isBFloat16())
+      return MVT::bf16;
+
+    if (Ty.isX86FP80())
+      return MVT::f80;
+
+    if (Ty.isPPCF128())
+      return MVT::ppcf128;
 
-  return MVT::getVectorVT(
-      MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
-      Ty.getElementCount());
+    return MVT::getFloatingPointVT(Ty.getSizeInBits());
+  }
+
+  return MVT::getIntegerVT(Ty.getSizeInBits());
 }
 
 EVT llvm::getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx) {
@@ -63,25 +104,28 @@ EVT llvm::getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx) {
   return EVT::getIntegerVT(Ctx, Ty.getSizeInBits());
 }
 
-LLT llvm::getLLTForMVT(MVT Ty) {
-  if (!Ty.isVector())
-    return LLT::scalar(Ty.getSizeInBits());
-
-  return LLT::scalarOrVector(Ty.getVectorElementCount(),
-                             Ty.getVectorElementType().getSizeInBits());
-}
+LLT llvm::getLLTForMVT(MVT VT) { return LLT(VT); }
 
 const llvm::fltSemantics &llvm::getFltSemanticForLLT(LLT Ty) {
-  assert(Ty.isScalar() && "Expected a scalar type.");
-  switch (Ty.getSizeInBits()) {
-  case 16:
-    return APFloat::IEEEhalf();
-  case 32:
-    return APFloat::IEEEsingle();
-  case 64:
-    return APFloat::IEEEdouble();
-  case 128:
-    return APFloat::IEEEquad();
+  assert((Ty.isAnyScalar() || Ty.isFloat()) &&
+         "Expected a any scalar or float type.");
+
+  // Any scalar type always matches IEEE format
+  // FIXME: Remove this handling
+  if (Ty.isAnyScalar()) {
+    switch (Ty.getSizeInBits()) {
+    default:
+      llvm_unreachable("Invalid FP type size.");
+    case 16:
+      return APFloat::IEEEhalf();
+    case 32:
+      return APFloat::IEEEsingle();
+    case 64:
+      return APFloat::IEEEdouble();
+    case 128:
+      return APFloat::IEEEquad();
+    }
   }
-  llvm_unreachable("Invalid FP type size.");
+
+  return APFloat::EnumToSemantics(Ty.getFpSemantics());
 }

diff  --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index fdd8f490eb52f..84b806ae81f39 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -2115,26 +2115,35 @@ static bool verifyAddrSpace(uint64_t AddrSpace) {
 }
 
 bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
-  if (Token.range().front() == 's' || Token.range().front() == 'p') {
-    StringRef SizeStr = Token.range().drop_front();
-    if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-      return error("expected integers after 's'/'p' type character");
-  }
-
-  if (Token.range().front() == 's') {
-    auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
-    if (ScalarSize) {
-      if (!verifyScalarSize(ScalarSize))
-        return error("invalid size for scalar type");
-      Ty = LLT::scalar(ScalarSize);
-    } else {
+  StringRef TypeDigits = Token.range();
+  if (TypeDigits.consume_front("s") || TypeDigits.consume_front("i") ||
+      TypeDigits.consume_front("f") || TypeDigits.consume_front("p") ||
+      TypeDigits.consume_front("bf")) {
+    if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
+      return error(
+          "expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
+  }
+
+  bool Scalar = Token.range().starts_with("s");
+  if (Scalar || Token.range().starts_with("i")) {
+    auto ScalarSize = APSInt(TypeDigits).getZExtValue();
+    if (!ScalarSize) {
       Ty = LLT::token();
+      lex();
+      return false;
     }
+
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for scalar type");
+
+    Ty = Scalar ? LLT::scalar(ScalarSize) : LLT::integer(ScalarSize);
     lex();
     return false;
-  } else if (Token.range().front() == 'p') {
+  }
+
+  if (Token.range().starts_with("p")) {
     const DataLayout &DL = MF.getDataLayout();
-    uint64_t AS = APSInt(Token.range().drop_front()).getZExtValue();
+    uint64_t AS = APSInt(TypeDigits).getZExtValue();
     if (!verifyAddrSpace(AS))
       return error("invalid address space number");
 
@@ -2143,10 +2152,25 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
     return false;
   }
 
+  if (Token.range().starts_with("f") || Token.range().starts_with("bf")) {
+    auto ScalarSize = APSInt(TypeDigits).getZExtValue();
+    if (!ScalarSize || !verifyScalarSize(ScalarSize))
+      return error("invalid size for scalar type");
+
+    if (Token.range().starts_with("bf") && ScalarSize != 16)
+      return error("invalid size for bfloat");
+
+    Ty = Token.range().starts_with("bf") ? LLT::bfloat16()
+                                         : LLT::floatIEEE(ScalarSize);
+    lex();
+    return false;
+  }
+
   // Now we're looking for a vector.
   if (Token.isNot(MIToken::less))
-    return error(Loc, "expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, "
-                      "or <vscale x M x pA> for GlobalISel type");
+    return error(Loc, "expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, "
+                      "or <vscale x M x pA> for GlobalISel type, "
+                      "where t = {'s', 'i', 'f', 'bf'}");
   lex();
 
   bool HasVScale =
@@ -2154,15 +2178,17 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
   if (HasVScale) {
     lex();
     if (Token.isNot(MIToken::Identifier) || Token.stringValue() != "x")
-      return error("expected <vscale x M x sN> or <vscale x M x pA>");
+      return error(
+          "expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}");
     lex();
   }
 
   auto GetError = [this, &HasVScale, Loc]() {
     if (HasVScale)
-      return error(
-          Loc, "expected <vscale x M x sN> or <vscale M x pA> for vector type");
-    return error(Loc, "expected <M x sN> or <M x pA> for vector type");
+      return error(Loc, "expected <vscale x M x tN> for vector type, where t = "
+                        "{'s', 'i', 'f', 'bf', 'p'}");
+    return error(Loc, "expected <M x tN> for vector type, where t = {'s', 'i', "
+                      "'f', 'bf', 'p'}");
   };
 
   if (Token.isNot(MIToken::IntegerLiteral))
@@ -2177,25 +2203,40 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
     return GetError();
   lex();
 
-  if (Token.range().front() != 's' && Token.range().front() != 'p')
+  StringRef VectorTyDigits = Token.range();
+  if (!VectorTyDigits.consume_front("s") &&
+      !VectorTyDigits.consume_front("i") &&
+      !VectorTyDigits.consume_front("f") &&
+      !VectorTyDigits.consume_front("p") && !VectorTyDigits.consume_front("bf"))
     return GetError();
 
-  StringRef SizeStr = Token.range().drop_front();
-  if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-    return error("expected integers after 's'/'p' type character");
+  if (VectorTyDigits.empty() || !llvm::all_of(VectorTyDigits, isdigit))
+    return error(
+        "expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
 
-  if (Token.range().front() == 's') {
-    auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
+  Scalar = Token.range().starts_with("s");
+  if (Scalar || Token.range().starts_with("i")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
     if (!verifyScalarSize(ScalarSize))
       return error("invalid size for scalar element in vector");
-    Ty = LLT::scalar(ScalarSize);
-  } else if (Token.range().front() == 'p') {
+    Ty = Scalar ? LLT::scalar(ScalarSize) : LLT::integer(ScalarSize);
+  } else if (Token.range().starts_with("p")) {
     const DataLayout &DL = MF.getDataLayout();
-    uint64_t AS = APSInt(Token.range().drop_front()).getZExtValue();
+    uint64_t AS = APSInt(VectorTyDigits).getZExtValue();
     if (!verifyAddrSpace(AS))
       return error("invalid address space number");
 
     Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
+  } else if (Token.range().starts_with("f")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for float element in vector");
+    Ty = LLT::floatIEEE(ScalarSize);
+  } else if (Token.range().starts_with("bf")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for bfloat element in vector");
+    Ty = LLT::bfloat16();
   } else {
     return GetError();
   }
@@ -2212,14 +2253,15 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
 
 bool MIParser::parseTypedImmediateOperand(MachineOperand &Dest) {
   assert(Token.is(MIToken::Identifier));
-  StringRef TypeStr = Token.range();
-  if (TypeStr.front() != 'i' && TypeStr.front() != 's' &&
-      TypeStr.front() != 'p')
+  StringRef TypeDigits = Token.range();
+  if (!TypeDigits.consume_front("i") && !TypeDigits.consume_front("s") &&
+      !TypeDigits.consume_front("p") && !TypeDigits.consume_front("f") &&
+      !TypeDigits.consume_front("bf"))
+    return error("a typed immediate operand should start with one of 'i', "
+                 "'s', 'f', 'bf', or 'p'");
+  if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
     return error(
-        "a typed immediate operand should start with one of 'i', 's', or 'p'");
-  StringRef SizeStr = Token.range().drop_front();
-  if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-    return error("expected integers after 'i'/'s'/'p' type character");
+        "expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier");
 
   auto Loc = Token.location();
   lex();

diff  --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 980ff2c6717a6..c3be06384e317 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1336,7 +1336,16 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
       report("bitcast sizes must match", MI);
 
-    if (SrcTy == DstTy)
+    bool SameType = SrcTy.getKind() == DstTy.getKind();
+    if (SameType && SrcTy.isPointerOrPointerVector())
+      SameType &= SrcTy.getAddressSpace() == DstTy.getAddressSpace();
+
+    SameType &= SrcTy.getScalarSizeInBits() == DstTy.getScalarSizeInBits();
+
+    if (SameType && SrcTy.isVector())
+      SameType &= SrcTy.getElementCount() == DstTy.getElementCount();
+
+    if (SameType)
       report("bitcast must change the type", MI);
 
     break;

diff  --git a/llvm/lib/CodeGenTypes/LowLevelType.cpp b/llvm/lib/CodeGenTypes/LowLevelType.cpp
index 92b7fad3a0e24..484329e918903 100644
--- a/llvm/lib/CodeGenTypes/LowLevelType.cpp
+++ b/llvm/lib/CodeGenTypes/LowLevelType.cpp
@@ -16,36 +16,91 @@
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
+bool LLT::ExtendedLLT = false;
+
+static LLT::FpSemantics getFpSemanticsForMVT(MVT VT) {
+  switch (VT.getScalarType().SimpleTy) {
+  default:
+    llvm_unreachable("Unknown FP format");
+  case MVT::f16:
+    return LLT::FpSemantics::S_IEEEhalf;
+  case MVT::bf16:
+    return LLT::FpSemantics::S_BFloat;
+  case MVT::f32:
+    return LLT::FpSemantics::S_IEEEsingle;
+  case MVT::f64:
+    return LLT::FpSemantics::S_IEEEdouble;
+  case MVT::f80:
+    return LLT::FpSemantics::S_x87DoubleExtended;
+  case MVT::f128:
+    return LLT::FpSemantics::S_IEEEquad;
+  case MVT::ppcf128:
+    return LLT::FpSemantics::S_PPCDoubleDouble;
+  }
+}
+
 LLT::LLT(MVT VT) {
-  if (VT.isVector()) {
-    bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
-    init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
-         VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
-         /*AddressSpace=*/0);
+  if (!ExtendedLLT) {
+    if (VT.isVector()) {
+      bool AsVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
+      Kind Info = AsVector ? Kind::VECTOR_ANY : Kind::ANY_SCALAR;
+      init(Info, VT.getVectorElementCount(),
+           VT.getVectorElementType().getSizeInBits());
+    } else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
+      init(Kind::ANY_SCALAR, ElementCount::getFixed(0), VT.getSizeInBits());
+    } else {
+      this->Info = Kind::INVALID;
+      this->RawData = 0;
+    }
+    return;
+  }
+
+  bool IsFloatingPoint = VT.isFloatingPoint();
+  bool AsVector = VT.isVector() &&
+                  (VT.getVectorMinNumElements() > 1 || VT.isScalableVector());
+
+  if (AsVector) {
+    if (IsFloatingPoint)
+      init(LLT::Kind::VECTOR_FLOAT, VT.getVectorElementCount(),
+           VT.getVectorElementType().getSizeInBits(), getFpSemanticsForMVT(VT));
+    else
+      init(LLT::Kind::VECTOR_INTEGER, VT.getVectorElementCount(),
+           VT.getVectorElementType().getSizeInBits());
   } else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
     // Aggregates are no 
diff erent from real scalars as far as GlobalISel is
     // concerned.
-    init(/*IsPointer=*/false, /*IsVector=*/false, /*IsScalar=*/true,
-         ElementCount::getFixed(0), VT.getSizeInBits(), /*AddressSpace=*/0);
+    if (IsFloatingPoint)
+      init(LLT::Kind::FLOAT, ElementCount::getFixed(0), VT.getSizeInBits(),
+           getFpSemanticsForMVT(VT));
+    else
+      init(LLT::Kind::INTEGER, ElementCount::getFixed(0), VT.getSizeInBits());
   } else {
-    IsScalar = false;
-    IsPointer = false;
-    IsVector = false;
-    RawData = 0;
+    this->Info = Kind::INVALID;
+    this->RawData = 0;
   }
+  return;
 }
 
 void LLT::print(raw_ostream &OS) const {
   if (isVector()) {
     OS << "<";
     OS << getElementCount() << " x " << getElementType() << ">";
-  } else if (isPointer())
+  } else if (isPointer()) {
     OS << "p" << getAddressSpace();
-  else if (isValid()) {
+  } else if (isBFloat16()) {
+    OS << "bf16";
+  } else if (isPPCF128()) {
+    OS << "ppcf128";
+  } else if (isFloatIEEE()) {
+    OS << "f" << getScalarSizeInBits();
+  } else if (isInteger()) {
+    OS << "i" << getScalarSizeInBits();
+  } else if (isValid()) {
     assert(isScalar() && "unexpected type");
     OS << "s" << getScalarSizeInBits();
-  } else
+  } else {
     OS << "LLT_invalid";
+  }
 }
 
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index 77be58c533671..1a158b335321d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -463,8 +463,9 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
   LLT DestTy = MRI.getType(Dst);
   LLT ScalarDestTy = DestTy.getScalarType();
 
-  if ((ScalarDestTy != LLT::float64() && ScalarDestTy != LLT::float32() &&
-       ScalarDestTy != LLT::float16()) ||
+  // TODO: Expected float type in ScalarDestTy
+  if ((ScalarDestTy != LLT::scalar(64) && ScalarDestTy != LLT::scalar(32) &&
+       ScalarDestTy != LLT::scalar(16)) ||
       !MRI.hasOneNonDBGUse(Sel.getOperand(0).getReg()))
     return false;
 
@@ -485,7 +486,8 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
     return false;
 
   // For f32, only non-inline constants should be transformed.
-  if (ScalarDestTy == LLT::float32() && TII.isInlineConstant(*SelectTrueVal) &&
+  // TODO: Expected float32
+  if (ScalarDestTy == LLT::scalar(32) && TII.isInlineConstant(*SelectTrueVal) &&
       TII.isInlineConstant(*SelectFalseVal))
     return false;
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index c78ef16b00983..4e956db103188 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -297,9 +297,9 @@ constexpr LLT S1 = LLT::scalar(1);
 constexpr LLT S8 = LLT::scalar(8);
 constexpr LLT S16 = LLT::scalar(16);
 constexpr LLT S32 = LLT::scalar(32);
-constexpr LLT F32 = LLT::float32();
+constexpr LLT F32 = LLT::scalar(32); // TODO: Expected float32
 constexpr LLT S64 = LLT::scalar(64);
-constexpr LLT F64 = LLT::float64();
+constexpr LLT F64 = LLT::scalar(64); // TODO: Expected float64
 constexpr LLT S96 = LLT::scalar(96);
 constexpr LLT S128 = LLT::scalar(128);
 constexpr LLT S160 = LLT::scalar(160);
@@ -319,7 +319,8 @@ constexpr LLT V10S16 = LLT::fixed_vector(10, 16);
 constexpr LLT V12S16 = LLT::fixed_vector(12, 16);
 constexpr LLT V16S16 = LLT::fixed_vector(16, 16);
 
-constexpr LLT V2F16 = LLT::fixed_vector(2, LLT::float16());
+// TODO: Expected LLT::fixed_vector(2, LLT::float16())
+constexpr LLT V2F16 = LLT::fixed_vector(2, LLT::scalar(16));
 constexpr LLT V2BF16 = V2F16; // FIXME
 
 constexpr LLT V2S32 = LLT::fixed_vector(2, 32);
@@ -3480,11 +3481,12 @@ bool AMDGPULegalizerInfo::legalizeFMad(
   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 
   // TODO: Always legal with future ftz flag.
+  // TODO: Type is expected to be LLT::float32()/LLT::float16()
   // FIXME: Do we need just output?
-  if (Ty == LLT::float32() &&
+  if (Ty == LLT::scalar(32) &&
       MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign())
     return true;
-  if (Ty == LLT::float16() &&
+  if (Ty == LLT::scalar(16) &&
       MFI->getMode().FP64FP16Denormals == DenormalMode::getPreserveSign())
     return true;
 
@@ -4216,8 +4218,8 @@ bool AMDGPULegalizerInfo::legalizeFPow(MachineInstr &MI,
   Register Src1 = MI.getOperand(2).getReg();
   unsigned Flags = MI.getFlags();
   LLT Ty = B.getMRI()->getType(Dst);
-  const LLT F16 = LLT::float16();
-  const LLT F32 = LLT::float32();
+  const LLT F16 = LLT::scalar(16); // TODO: Expected LLT::float16()
+  const LLT F32 = LLT::scalar(32); // TODO: Expected LLT::float32()
 
   if (Ty == F32) {
     auto Log = B.buildFLog2(F32, Src0, Flags);
@@ -4260,7 +4262,7 @@ bool AMDGPULegalizerInfo::legalizeFFloor(MachineInstr &MI,
                                          MachineIRBuilder &B) const {
 
   const LLT S1 = LLT::scalar(1);
-  const LLT F64 = LLT::float64();
+  const LLT F64 = LLT::scalar(64); // TODO: Expected float64
   Register Dst = MI.getOperand(0).getReg();
   Register OrigSrc = MI.getOperand(1).getReg();
   unsigned Flags = MI.getFlags();

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-subvector.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-subvector.ll
index 149bf72b053ef..862e0f6022395 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-subvector.ll
@@ -334,10 +334,14 @@ define i32 @extract_v4iptr_vector_insert_const_fixed_legal(<4 x ptr> %a, <4 x pt
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY1]](<2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<2 x p0>), [[BITCAST1]](<2 x p0>)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
-  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[COPY2]](<2 x s64>), [[COPY3]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY2]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY3]](<2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[BITCAST2]](<2 x p0>), [[BITCAST3]](<2 x p0>)
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY $x1
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/pr168872.ll b/llvm/test/CodeGen/AArch64/GlobalISel/pr168872.ll
index 4405c06ff0e23..c7ff34fd35935 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/pr168872.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/pr168872.ll
@@ -12,16 +12,14 @@ define <4 x ptr> @pr168872(<4 x ptr> %ptrs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[C]](p0), [[C]](p0)
   ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY]](<2 x s64>)
   ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY1]](<2 x s64>)
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[C]](p0), [[C]](p0)
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[BITCAST]](<2 x p0>), [[BUILD_VECTOR]]
   ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[BITCAST1]](<2 x p0>), [[BUILD_VECTOR]]
-  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY]](<2 x s64>)
-  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY1]](<2 x s64>)
-  ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BITCAST2]](<2 x p0>)
-  ; CHECK-NEXT:   [[PTRTOINT1:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BITCAST3]](<2 x p0>)
+  ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BITCAST]](<2 x p0>)
+  ; CHECK-NEXT:   [[PTRTOINT1:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BITCAST1]](<2 x p0>)
   ; CHECK-NEXT:   [[PTRTOINT2:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BUILD_VECTOR]](<2 x p0>)
   ; CHECK-NEXT:   [[PTRTOINT3:%[0-9]+]]:_(<2 x s64>) = G_PTRTOINT [[BUILD_VECTOR]](<2 x p0>)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
@@ -36,10 +34,10 @@ define <4 x ptr> @pr168872(<4 x ptr> %ptrs) {
   ; CHECK-NEXT:   [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND1]], [[AND3]]
   ; CHECK-NEXT:   [[INTTOPTR:%[0-9]+]]:_(<2 x p0>) = G_INTTOPTR [[OR]](<2 x s64>)
   ; CHECK-NEXT:   [[INTTOPTR1:%[0-9]+]]:_(<2 x p0>) = G_INTTOPTR [[OR1]](<2 x s64>)
-  ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[INTTOPTR]](<2 x p0>)
-  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[INTTOPTR1]](<2 x p0>)
-  ; CHECK-NEXT:   $q0 = COPY [[BITCAST4]](<2 x s64>)
-  ; CHECK-NEXT:   $q1 = COPY [[BITCAST5]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[INTTOPTR]](<2 x p0>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[INTTOPTR1]](<2 x p0>)
+  ; CHECK-NEXT:   $q0 = COPY [[BITCAST2]](<2 x s64>)
+  ; CHECK-NEXT:   $q1 = COPY [[BITCAST3]](<2 x s64>)
   ; CHECK-NEXT:   RET_ReallyLR implicit $q0, implicit $q1
 entry:
   %cmp = icmp ugt <4 x ptr> %ptrs, zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
index 5b2d6607f6297..dca947100b31e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
@@ -153,7 +153,9 @@ define <4 x ptr> @vector_gep_v4i32(<4 x ptr> %b, <4 x i32> %off) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY]](<2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[COPY1]](<2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<2 x p0>), [[BITCAST1]](<2 x p0>)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $q2
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(<4 x s64>) = G_SEXT [[COPY2]](<4 x s32>)
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<4 x p0>) = G_PTR_ADD [[CONCAT_VECTORS]], [[SEXT]](<4 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument-multiple.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument-multiple.ll
index 28d53dab9d99f..06616510119e0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument-multiple.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument-multiple.ll
@@ -20,7 +20,11 @@ define void @formal_argument_mix_sve(
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
-  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY4]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY6]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>), [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
     <vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 16 x i8> %2, <vscale x 8 x ptr> %3
 ) {
@@ -190,16 +194,24 @@ define void @formal_argument_nxv4p0_4(<vscale x 4 x ptr> %0, <vscale x 4 x ptr>
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
-  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY2]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
-  ; CHECK-NEXT:   [[CONCAT_VECTORS2:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY4]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS2:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[BITCAST4]](<vscale x 2 x p0>), [[BITCAST5]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
-  ; CHECK-NEXT:   [[CONCAT_VECTORS3:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST6:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY6]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST7:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS3:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[BITCAST6]](<vscale x 2 x p0>), [[BITCAST7]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }
@@ -314,12 +326,20 @@ define void @formal_argument_nxv8p0_2(<vscale x 8 x ptr> %0, <vscale x 8 x ptr>
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY2]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>), [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
-  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY4]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST6:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY6]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST7:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[BITCAST4]](<vscale x 2 x p0>), [[BITCAST5]](<vscale x 2 x p0>), [[BITCAST6]](<vscale x 2 x p0>), [[BITCAST7]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }
@@ -436,7 +456,15 @@ define void @formal_argument_nxv16p0_1(<vscale x 16 x ptr> %0) {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY2]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY4]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST6:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY6]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST7:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>), [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>), [[BITCAST4]](<vscale x 2 x p0>), [[BITCAST5]](<vscale x 2 x p0>), [[BITCAST6]](<vscale x 2 x p0>), [[BITCAST7]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument.ll
index ec89da824779a..e6476ae726540 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-sve-formal-argument.ll
@@ -155,7 +155,9 @@ define void @formal_argument_nxv4p0(<vscale x 4 x ptr> %0) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }
@@ -255,7 +257,11 @@ define void @formal_argument_nxv8p0(<vscale x 8 x ptr> %0) {
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY2]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>), [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }
@@ -383,7 +389,15 @@ define void @formal_argument_nxv16p0(<vscale x 16 x ptr> %0) {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
-  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY2]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY3]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY4]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY5]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST6:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY6]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[BITCAST7:%[0-9]+]]:_(<vscale x 2 x p0>) = G_BITCAST [[COPY7]](<vscale x 2 x s64>)
+  ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[BITCAST]](<vscale x 2 x p0>), [[BITCAST1]](<vscale x 2 x p0>), [[BITCAST2]](<vscale x 2 x p0>), [[BITCAST3]](<vscale x 2 x p0>), [[BITCAST4]](<vscale x 2 x p0>), [[BITCAST5]](<vscale x 2 x p0>), [[BITCAST6]](<vscale x 2 x p0>), [[BITCAST7]](<vscale x 2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR
   ret void
 }

diff  --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
index cece3601dc1b2..95fd669a5d971 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
@@ -1,10 +1,10 @@
 # RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is only a single 's'/'p' character
+# When a low-level type is only a single 's'/'i'/'f'/'bf'/'p' type identifier
 ---
 name: test_low_level_type_is_single_s_p
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: expected integers after 's'/'p' type character
+    ; CHECK: [[@LINE+1]]:10: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
     %0:_(s) = COPY $x0
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
index 4a7b68dab623a..dd0bb73a6cf42 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
@@ -5,6 +5,6 @@ name: test_low_level_type_does_not_start_with_s_p_lt
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, or <vscale x M x pA> for GlobalISel type
-    %0:_(i64) = COPY $x0
+    ; CHECK: [[@LINE+1]]:10: expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, or <vscale x M x pA> for GlobalISel type, where t = {'s', 'i', 'f', 'bf'}
+    %0:_(n64) = COPY $x0
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
index 1bff7a5ec9ced..6277d24aacab3 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
@@ -5,6 +5,6 @@ name: test_low_level_type_is_single_s_p
 body: |
   bb.0:
     liveins: $q0
-    ; CHECK: [[@LINE+1]]:15: expected integers after 's'/'p' type character
+    ; CHECK: [[@LINE+1]]:15: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
     %0:_(<2 x p>) = COPY $q0
 ...

diff  --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
index ebb3d37f9dfa1..ba652329c5337 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
@@ -1,10 +1,10 @@
 # RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is a vector which element type does not start with 's' or 'p'
+# When a low-level type is a vector which element type does not start with right type identifier
 ---
 name: test_low_level_type_does_not_start_with_s_p
 body: |
   bb.0:
     liveins: $q0
-    ; CHECK: [[@LINE+1]]:10: expected <M x sN> or <M x pA> for vector type
-    %0:_(<2 x i64>) = COPY $q0
+    ; CHECK: [[@LINE+1]]:10: expected <M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
+    %0:_(<2 x n64>) = COPY $q0
 ...

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
index 5553d97acd003..6fc6640ea422a 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
@@ -6,5 +6,5 @@ body: |
     %0:_(<vscale) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale x M x pA>
+# CHECK: expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}
 

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
index 12bfb82ebcd12..0017e01be3b04 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale notanx) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale x M x pA>
+# CHECK: expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
index 7d7d7e49f23fe..37a9a0d2c159b 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
index f8927c22ab45f..64296f624826e 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s32) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
index 5ced1aea30c08..9c840d92ac967 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p0) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
index 94b8230233fa6..982adfdf792c2 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s32 X) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
index 323e2d975692f..8a3e86ecaaab9 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p0 X) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
index d1613869bf671..228b367b06d40 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(notatype) = IMPLICIT_DEF
 ...
 
-# CHECK: expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, or <vscale x M x pA> for GlobalISel type
+# CHECK: expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, or <vscale x M x pA> for GlobalISel type, where t = {'s', 'i', 'f', 'bf'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
index c504a7d6be249..b187bff218a75 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
index c504a7d6be249..b187bff218a75 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
index 654f534f4d301..1969f28b18d28 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
@@ -6,5 +6,5 @@ body: |
     %0:_(<vscale x notanint) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
 

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
index 26be2868c522e..49823f7432ba9 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x 4) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
index 07a30f57139dc..7f8e421e76ae3 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
@@ -7,5 +7,5 @@ body: |
     %0:_(<vscale x 4 x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
 

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
index dba902efe6331..ae3c472e2557c 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x 4 x notansorp) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
index 8bedeabaa7906..9044f77f84dc5 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier

diff  --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
index fd0b9a4a054ab..9220715320d99 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
@@ -6,6 +6,6 @@ body: |
     %0:_(<vscale x 4 x pX) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
 
 

diff  --git a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
index 72908711e9ee5..7a2e381372549 100644
--- a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
+++ b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
@@ -1,5 +1,5 @@
 # RUN: not llc -mtriple=wasm32-unknown-unknown -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a typed immediate operand is only a single 'i'/'s'/'p' character
+# When a typed immediate operand is only a single type character
 ---
 name: test_typed_immediate_operand_invalid0
 liveins:
@@ -7,7 +7,7 @@ liveins:
 body: |
   bb.0:
     liveins: $arguments
-    ; CHECK: [[@LINE+1]]:24: expected integers after 'i'/'s'/'p' type character
+    ; CHECK: [[@LINE+1]]:24: expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier
     %0:i32 = CONST_I32 i 0, implicit-def dead $arguments
     RETURN implicit-def dead $arguments
 ...

diff  --git a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
index f5c16b52553bf..7f59a9e5eca0f 100644
--- a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
+++ b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
@@ -1,5 +1,5 @@
 # RUN: not llc -mtriple=wasm32-unknown-unknown -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a typed immediate operand does not start with 'i', 's', or 'p'
+# When a typed immediate operand does not start with right type identifier
 ---
 name: test_typed_immediate_operand_invalid1
 liveins:
@@ -7,7 +7,7 @@ liveins:
 body: |
   bb.0:
     liveins: $arguments
-    ; CHECK: [[@LINE+1]]:24: a typed immediate operand should start with one of 'i', 's', or 'p'
+    ; CHECK: [[@LINE+1]]:24: a typed immediate operand should start with one of 'i', 's', 'f', 'bf', or 'p'
     %0:i32 = CONST_I32 abc 0, implicit-def dead $arguments
     RETURN implicit-def dead $arguments
 ...

diff  --git a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
index 3ee4a2b6dce45..b4cdd53d78124 100644
--- a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
+++ b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
@@ -1,6 +1,7 @@
 // RUN: llvm-tblgen -gen-global-isel -I %p/../../../include -I %p/../Common -optimize-match-table=false %s -o %t.non-optimized.cpp
 // RUN: llvm-tblgen -gen-global-isel -I %p/../../../include -I %p/../Common -optimize-match-table=true  %s -o %t.optimized.cpp
 // RUN: llvm-tblgen -gen-global-isel -I %p/../../../include -I %p/../Common %s -o %t.default.cpp
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../../include -I %p/../Common -gisel-extended-llt  %s -o %t.extended-llt.cpp
 
 // RUN: FileCheck %s --check-prefixes=CHECK,R19C,R19N -input-file=%t.non-optimized.cpp
 // RUN: FileCheck %s --check-prefixes=CHECK,R19C,R19O -input-file=%t.optimized.cpp
@@ -19,6 +20,7 @@
 
 // RUN: FileCheck %s --check-prefixes=CHECK,R02C,R02N,NOOPT -input-file=%t.non-optimized.cpp
 // RUN: FileCheck %s --check-prefixes=CHECK,R02C,R02O       -input-file=%t.optimized.cpp
+// RUN: FileCheck %s --check-prefixes=EXTENDED -input-file=%t.extended-llt.cpp
 
 // RUN: 
diff  %t.default.cpp %t.optimized.cpp
 
@@ -98,15 +100,28 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
 
 // CHECK-LABEL: // LLT Objects.
 // CHECK-NEXT:  enum {
-// CHECK-NEXT:    GILLT_p0s32
 // CHECK-NEXT:    GILLT_s32,
-// CHECK-NEXT:  }
+// CHECK-NEXT:    GILLT_p0s32,
+// CHECK-NEXT:  };
 // CHECK-NEXT:  const static size_t NumTypeObjects = 2;
 // CHECK-NEXT:  const static LLT TypeObjects[] = {
-// CHECK-NEXT:    LLT::pointer(0, 32),
 // CHECK-NEXT:    LLT::scalar(32),
+// CHECK-NEXT:    LLT::pointer(0, 32),
 // CHECK-NEXT:  };
 
+// EXTENDED-LABEL: // LLT Objects.
+// EXTENDED-NEXT:  enum {
+// EXTENDED-NEXT:    GILLT_i32,
+// EXTENDED-NEXT:    GILLT_f32,
+// EXTENDED-NEXT:    GILLT_p0s32,
+// EXTENDED-NEXT:  };
+// EXTENDED-NEXT:  const static size_t NumTypeObjects = 3;
+// EXTENDED-NEXT:  const static LLT TypeObjects[] = {
+// EXTENDED-NEXT:    LLT::integer(32),
+// EXTENDED-NEXT:    LLT::floatIEEE(32),
+// EXTENDED-NEXT:    LLT::pointer(0, 32),
+// EXTENDED-NEXT:  };
+
 // CHECK-LABEL: enum SubtargetFeatureBits : uint8_t {
 // CHECK-NEXT:    Feature_HasABit = 0,
 // CHECK-NEXT:    Feature_HasBBit = 1,
@@ -256,6 +271,10 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
 // CHECK-LABEL: MyTargetInstructionSelector::getMatchTable() const {
 // CHECK-NEXT: MatchTable0[] = {
 
+// EXTENDED: const uint8_t *
+// EXTENDED-LABEL: MyTargetInstructionSelector::getMatchTable() const {
+// EXTENDED-NEXT: MatchTable0[] = {
+
 //===- Test a pattern with multiple ComplexPatterns in multiple instrs ----===//
 //
 // R19O-NEXT:  GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2({{[0-9]+}}), GIMT_Encode2({{[0-9]+}}), /*)*//*default:*//*Label [[DEFAULT_NUM:[0-9]+]]*/ GIMT_Encode4([[DEFAULT:[0-9]+]]),
@@ -333,6 +352,17 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
 // R19O:       // Label [[DEFAULT_NUM]]: @[[DEFAULT]]
 // R19O-NEXT:  GIM_Reject,
 // R19O-NEXT:  };
+//
+// EXTENDED-NEXT:  GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2({{[0-9]+}}), GIMT_Encode2({{[0-9]+}}), /*)*//*default:*//*Label [[DEFAULT_NUM:[0-9]+]]*/ GIMT_Encode4([[DEFAULT:[0-9]+]]),
+// EXTENDED-NEXT:  /*TargetOpcode::G_ADD*//*Label [[CASE_ADD_NUM:[0-9]+]]*/ GIMT_Encode4([[CASE_ADD:[0-9]+]]),
+// EXTENDED:       /*TargetOpcode::G_SELECT*//*Label [[CASE_SELECT_NUM:[0-9]+]]*/ GIMT_Encode4([[CASE_SELECT:[0-9]+]]),
+// EXTENDED:       // Label [[CASE_ADD_NUM]]: @[[CASE_ADD]]
+// EXTENDED:       // Label [[CASE_SELECT_NUM]]: @[[CASE_SELECT]]
+// EXTENDED-NEXT:  GIM_Try, /*On fail goto*//*Label [[GROUP_NUM:[0-9]+]]*/ GIMT_Encode4([[GROUP:[0-9]+]]),
+// EXTENDED-NEXT:    GIM_RootCheckType, /*Op*/0, /*Type*/GILLT_i32,
+// EXTENDED-NEXT:    GIM_RootCheckType, /*Op*/1, /*Type*/GILLT_i32,
+// EXTENDED-NEXT:    GIM_RootCheckType, /*Op*/2, /*Type*/GILLT_i32,
+// EXTENDED-NEXT:    GIM_RootCheckType, /*Op*/3, /*Type*/GILLT_i32,
 
 def INSN3 : I<(outs GPR32:$dst),
               (ins GPR32Op:$src1, GPR32:$src2a, GPR32:$src2b, GPR32:$scr), []>;

diff  --git a/llvm/test/TableGen/GlobalISelEmitter/HwModes.td b/llvm/test/TableGen/GlobalISelEmitter/HwModes.td
index 466f637e86ce1..2e668fe3c11f8 100644
--- a/llvm/test/TableGen/GlobalISelEmitter/HwModes.td
+++ b/llvm/test/TableGen/GlobalISelEmitter/HwModes.td
@@ -70,17 +70,17 @@ class I<dag OOps, dag IOps, list<dag> Pat>
 
 // CHECK-LABEL: // LLT Objects.
 // CHECK-NEXT:  enum {
-// CHECK-NEXT:    GILLT_p0s32,
-// CHECK-NEXT:    GILLT_p0s64,
 // CHECK-NEXT:    GILLT_s32,
 // CHECK-NEXT:    GILLT_s64,
-// CHECK-NEXT:  }
+// CHECK-NEXT:    GILLT_p0s32,
+// CHECK-NEXT:    GILLT_p0s64,
+// CHECK-NEXT:  };
 // CHECK-NEXT:  const static size_t NumTypeObjects = 4;
 // CHECK-NEXT:  const static LLT TypeObjects[] = {
-// CHECK-NEXT:    LLT::pointer(0, 32),
-// CHECK-NEXT:    LLT::pointer(0, 64),
 // CHECK-NEXT:    LLT::scalar(32),
 // CHECK-NEXT:    LLT::scalar(64),
+// CHECK-NEXT:    LLT::pointer(0, 32),
+// CHECK-NEXT:    LLT::pointer(0, 64),
 // CHECK-NEXT:  };
 
 // CHECK-LABEL: enum SubtargetFeatureBits : uint8_t {

diff  --git a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
index 4ef6aff943f73..e85a3aa72c961 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -14,6 +14,7 @@ set(LLVM_LINK_COMPONENTS
   )
 
 add_llvm_unittest(GlobalISelTests
+  IRTranslatorBF16Test.cpp
   ConstantFoldingTest.cpp
   CSETest.cpp
   GIMatchTableExecutorTest.cpp

diff  --git a/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
new file mode 100644
index 0000000000000..377602fff1579
--- /dev/null
+++ b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
@@ -0,0 +1,135 @@
+//===- IRTranslator.cpp - IRTranslator unit tests -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "gtest/gtest.h"
+#include <memory>
+
+using namespace llvm;
+
+namespace {
+struct AArch64IRTranslatorTest : public ::testing::Test {
+  LLVMContext C;
+
+public:
+  AArch64IRTranslatorTest() {}
+  std::unique_ptr<TargetMachine> createTargetMachine() const {
+    Triple TargetTriple("aarch64--");
+    std::string Error;
+    const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
+    if (!T)
+      return nullptr;
+
+    TargetOptions Options;
+    return std::unique_ptr<TargetMachine>(
+        T->createTargetMachine(TargetTriple, "", "", Options, std::nullopt,
+                               std::nullopt, CodeGenOptLevel::None));
+  }
+
+  std::unique_ptr<Module> parseIR(const char *IR) {
+    SMDiagnostic Err;
+    std::unique_ptr<Module> Mod = parseAssemblyString(IR, Err, C);
+    if (!Mod)
+      Err.print("Test TargetIRTranslator", errs());
+    return Mod;
+  }
+};
+} // namespace
+
+TEST_F(AArch64IRTranslatorTest, IRTranslateBfloat16) {
+  InitializeAllTargets();
+  InitializeAllTargetMCs();
+  InitializeAllAsmPrinters();
+  InitializeAllAsmParsers();
+
+  PassRegistry *Registry = PassRegistry::getPassRegistry();
+  initializeCore(*Registry);
+  initializeCodeGen(*Registry);
+  initializeGlobalISel(*Registry);
+
+  std::unique_ptr<Module> M = parseIR(R"(
+  define void @foo(ptr %p0) {
+    %ptr1 = getelementptr bfloat, ptr %p0, i64 0
+    %ptr2 = getelementptr bfloat, ptr %p0, i64 1
+    %ptr3 = getelementptr bfloat, ptr %p0, i64 2
+    %a = load bfloat, ptr %ptr1, align 2
+    %b = load bfloat, ptr %ptr2, align 2
+    %c = load bfloat, ptr %ptr3, align 2
+    %mul = fmul bfloat %a, %b
+    %res = fadd bfloat %mul, %c
+    %ptr4 = getelementptr bfloat, ptr %p0, i64 3
+    store bfloat %res, ptr %ptr4, align 2
+    ret void
+  }
+  )");
+
+  auto TM = createTargetMachine();
+  if (!TM)
+    GTEST_SKIP();
+  M->setDataLayout(TM->createDataLayout());
+
+  TM->setGlobalISel(true);
+  TM->setGlobalISelAbort(GlobalISelAbortMode::DisableWithDiag);
+  LLT::setUseExtended(true);
+
+  legacy::PassManager PM;
+  TargetPassConfig *TPC(TM->createPassConfig(PM));
+
+  MachineModuleInfoWrapperPass *MMIWP =
+      new MachineModuleInfoWrapperPass(TM.get());
+  PM.add(TPC);
+  PM.add(MMIWP);
+  PM.add(new IRTranslator());
+  PM.run(*M);
+
+  auto *MMI = &MMIWP->getMMI();
+  Function *F = M->getFunction("foo");
+  auto *MF = MMI->getMachineFunction(*F);
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  EXPECT_FALSE(MF->getProperties().hasProperty(
+      llvm::MachineFunctionProperties::Property::FailedISel));
+  for (auto &MI : MF->front()) {
+    if (MI.getOpcode() == TargetOpcode::G_LOAD) {
+      EXPECT_TRUE(MRI.getType(MI.getOperand(0).getReg()).isBFloat16());
+    }
+
+    if (MI.getOpcode() == TargetOpcode::G_FADD ||
+        MI.getOpcode() == TargetOpcode::G_FMUL) {
+      for (auto &Op : MI.operands()) {
+        EXPECT_TRUE(MRI.getType(Op.getReg()).isBFloat16());
+      }
+    }
+  }
+  MMI->deleteMachineFunctionFor(*F);
+
+  // Run again without extended LLT
+  LLT::setUseExtended(false);
+
+  PM.run(*M);
+  MF = MMI->getMachineFunction(*F);
+  EXPECT_TRUE(MF->getProperties().hasProperty(
+      llvm::MachineFunctionProperties::Property::FailedISel));
+}

diff  --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index edf6280b50a70..411d834647d70 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -407,26 +407,11 @@ static_assert(CEV2P1.getElementCount() != ElementCount::getFixed(1));
 static_assert(CEV2S32.getElementCount() == ElementCount::getFixed(2));
 static_assert(CEV2S32.getSizeInBits() == TypeSize::getFixed(64));
 static_assert(CEV2P1.getSizeInBits() == TypeSize::getFixed(128));
-static_assert(CEV2P1.getScalarType() == LLT::pointer(1, 64));
-static_assert(CES32.getScalarType() == CES32);
-static_assert(CEV2S32.getScalarType() == CES32);
-static_assert(CEV2S32.changeElementType(CEP0) == LLT::fixed_vector(2, CEP0));
-static_assert(CEV2S32.changeElementSize(16) == LLT::fixed_vector(2, 16));
-static_assert(CEV2S32.changeElementCount(ElementCount::getFixed(4)) ==
-              LLT::fixed_vector(4, 32));
 static_assert(CES32.isByteSized());
 static_assert(!LLT::scalar(7).isByteSized());
 static_assert(CES32.getScalarSizeInBits() == 32);
 static_assert(CEP0.getAddressSpace() == 0);
 static_assert(LLT::pointer(1, 64).getAddressSpace() == 1);
-static_assert(CEV2S32.multiplyElements(2) == LLT::fixed_vector(4, 32));
-static_assert(CEV2S32.divide(2) == LLT::scalar(32));
-static_assert(LLT::scalarOrVector(ElementCount::getFixed(1), LLT::scalar(32)) ==
-              LLT::scalar(32));
-static_assert(LLT::scalarOrVector(ElementCount::getFixed(2), LLT::scalar(32)) ==
-              LLT::fixed_vector(2, 32));
-static_assert(LLT::scalarOrVector(ElementCount::getFixed(2), CEP0) ==
-              LLT::fixed_vector(2, CEP0));
 
 TEST(LowLevelTypeTest, ConstExpr) {
   EXPECT_EQ(LLT(), CELLT);
@@ -434,6 +419,21 @@ TEST(LowLevelTypeTest, ConstExpr) {
   EXPECT_EQ(LLT::fixed_vector(2, 32), CEV2S32);
   EXPECT_EQ(LLT::pointer(0, 32), CEP0);
   EXPECT_EQ(LLT::scalable_vector(2, 32), CESV2S32);
+  EXPECT_EQ(CEV2P1.getScalarType(), LLT::pointer(1, 64));
+  EXPECT_EQ(CES32.getScalarType(), CES32);
+  EXPECT_EQ(CEV2S32.getScalarType(), CES32);
+  EXPECT_EQ(CEV2S32.changeElementType(CEP0), LLT::fixed_vector(2, CEP0));
+  EXPECT_EQ(CEV2S32.changeElementSize(16), LLT::fixed_vector(2, 16));
+  EXPECT_EQ(CEV2S32.changeElementCount(ElementCount::getFixed(4)),
+            LLT::fixed_vector(4, 32));
+  EXPECT_EQ(CEV2S32.multiplyElements(2), LLT::fixed_vector(4, 32));
+  EXPECT_EQ(CEV2S32.divide(2), LLT::scalar(32));
+  EXPECT_EQ(LLT::scalarOrVector(ElementCount::getFixed(1), LLT::scalar(32)),
+            LLT::scalar(32));
+  EXPECT_EQ(LLT::scalarOrVector(ElementCount::getFixed(2), LLT::scalar(32)),
+            LLT::fixed_vector(2, 32));
+  EXPECT_EQ(LLT::scalarOrVector(ElementCount::getFixed(2), CEP0),
+            LLT::fixed_vector(2, CEP0));
 }
 
 TEST(LowLevelTypeTest, IsFixedVector) {

diff  --git a/llvm/utils/TableGen/Common/CMakeLists.txt b/llvm/utils/TableGen/Common/CMakeLists.txt
index 4e129b465ebd7..7b33c2a6e02a0 100644
--- a/llvm/utils/TableGen/Common/CMakeLists.txt
+++ b/llvm/utils/TableGen/Common/CMakeLists.txt
@@ -42,6 +42,9 @@ add_llvm_library(LLVMTableGenCommon STATIC OBJECT EXCLUDE_FROM_ALL DISABLE_LLVM_
   DEPENDS
   vt_gen
   intrinsics_gen
+
+  LINK_COMPONENTS
+  CodeGenTypes
   )
 
 # Users may include its headers as "Common/*.h"

diff  --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index bd1c3a4b9e828..8232ccacf3b45 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -10,6 +10,7 @@
 #include "Common/CodeGenInstruction.h"
 #include "Common/CodeGenRegisters.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/LEB128.h"
 #include "llvm/Support/ScopedPrinter.h"
@@ -363,42 +364,96 @@ std::string LLTCodeGen::getCxxEnumValue() const {
 
 void LLTCodeGen::emitCxxEnumValue(raw_ostream &OS) const {
   if (Ty.isScalar()) {
-    OS << "GILLT_s" << Ty.getSizeInBits();
+    if (Ty.isBFloat16())
+      OS << "GILLT_bf16";
+    else if (Ty.isPPCF128())
+      OS << "GILLT_ppcf128";
+    else if (Ty.isX86FP80())
+      OS << "GILLT_x86fp80";
+    else if (Ty.isFloat())
+      OS << "GILLT_f" << Ty.getSizeInBits();
+    else if (Ty.isInteger())
+      OS << "GILLT_i" << Ty.getSizeInBits();
+    else
+      OS << "GILLT_s" << Ty.getSizeInBits();
     return;
   }
   if (Ty.isVector()) {
     OS << (Ty.isScalable() ? "GILLT_nxv" : "GILLT_v")
-       << Ty.getElementCount().getKnownMinValue() << "s"
-       << Ty.getScalarSizeInBits();
+       << Ty.getElementCount().getKnownMinValue();
+
+    LLT ElemTy = Ty.getElementType();
+    if (ElemTy.isBFloat16())
+      OS << "bf16";
+    else if (ElemTy.isPPCF128())
+      OS << "ppcf128";
+    else if (ElemTy.isX86FP80())
+      OS << "x86fp80";
+    else if (ElemTy.isFloat())
+      OS << "f" << ElemTy.getSizeInBits();
+    else if (ElemTy.isInteger())
+      OS << "i" << ElemTy.getSizeInBits();
+    else
+      OS << "s" << ElemTy.getSizeInBits();
     return;
   }
+
   if (Ty.isPointer()) {
     OS << "GILLT_p" << Ty.getAddressSpace();
     if (Ty.getSizeInBits() > 0)
       OS << "s" << Ty.getSizeInBits();
     return;
   }
+
   llvm_unreachable("Unhandled LLT");
 }
 
 void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
   if (Ty.isScalar()) {
-    OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
+    if (Ty.isInteger())
+      OS << "LLT::integer(" << Ty.getScalarSizeInBits() << ")";
+    else if (Ty.isBFloat16())
+      OS << "LLT::bfloat16()";
+    else if (Ty.isPPCF128())
+      OS << "LLT::ppcf128()";
+    else if (Ty.isX86FP80())
+      OS << "LLT::x86fp80()";
+    else if (Ty.isFloat())
+      OS << "LLT::floatIEEE(" << Ty.getScalarSizeInBits() << ")";
+    else
+      OS << "LLT::scalar(" << Ty.getScalarSizeInBits() << ")";
     return;
   }
+
   if (Ty.isVector()) {
     OS << "LLT::vector("
        << (Ty.isScalable() ? "ElementCount::getScalable("
                            : "ElementCount::getFixed(")
-       << Ty.getElementCount().getKnownMinValue() << "), "
-       << Ty.getScalarSizeInBits() << ")";
+       << Ty.getElementCount().getKnownMinValue() << "), ";
+
+    LLT ElemTy = Ty.getElementType();
+    if (ElemTy.isInteger())
+      OS << "LLT::integer(" << ElemTy.getScalarSizeInBits() << ")";
+    else if (ElemTy.isBFloat16())
+      OS << "LLT::bfloat16()";
+    else if (ElemTy.isPPCF128())
+      OS << "LLT::ppcf128()";
+    else if (ElemTy.isX86FP80())
+      OS << "LLT::x86fp80()";
+    else if (ElemTy.isFloat())
+      OS << "LLT::floatIEEE(" << ElemTy.getScalarSizeInBits() << ")";
+    else
+      OS << "LLT::scalar(" << Ty.getScalarSizeInBits() << ")";
+    OS << ")";
     return;
   }
+
   if (Ty.isPointer() && Ty.getSizeInBits() > 0) {
     OS << "LLT::pointer(" << Ty.getAddressSpace() << ", " << Ty.getSizeInBits()
        << ")";
     return;
   }
+
   llvm_unreachable("Unhandled LLT");
 }
 
@@ -406,47 +461,17 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
 /// particular logic behind the order but either A < B or B < A must be
 /// true if A != B.
 bool LLTCodeGen::operator<(const LLTCodeGen &Other) const {
-  if (Ty.isValid() != Other.Ty.isValid())
-    return Ty.isValid() < Other.Ty.isValid();
-  if (!Ty.isValid())
-    return false;
-
-  if (Ty.isVector() != Other.Ty.isVector())
-    return Ty.isVector() < Other.Ty.isVector();
-  if (Ty.isScalar() != Other.Ty.isScalar())
-    return Ty.isScalar() < Other.Ty.isScalar();
-  if (Ty.isPointer() != Other.Ty.isPointer())
-    return Ty.isPointer() < Other.Ty.isPointer();
-
-  if (Ty.isPointer() && Ty.getAddressSpace() != Other.Ty.getAddressSpace())
-    return Ty.getAddressSpace() < Other.Ty.getAddressSpace();
-
-  if (Ty.isVector() && Ty.getElementCount() != Other.Ty.getElementCount())
-    return std::tuple(Ty.isScalable(),
-                      Ty.getElementCount().getKnownMinValue()) <
-           std::tuple(Other.Ty.isScalable(),
-                      Other.Ty.getElementCount().getKnownMinValue());
-
-  assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
-         "Unexpected mismatch of scalable property");
-  return Ty.isVector()
-             ? std::tuple(Ty.isScalable(),
-                          Ty.getSizeInBits().getKnownMinValue()) <
-                   std::tuple(Other.Ty.isScalable(),
-                              Other.Ty.getSizeInBits().getKnownMinValue())
-             : Ty.getSizeInBits().getFixedValue() <
-                   Other.Ty.getSizeInBits().getFixedValue();
+  return Ty.getUniqueRAWLLTData() < Other.Ty.getUniqueRAWLLTData();
 }
 
 //===- LLTCodeGen Helpers -------------------------------------------------===//
 
 std::optional<LLTCodeGen> llvm::gi::MVTToLLT(MVT VT) {
   if (VT.isVector() && !VT.getVectorElementCount().isScalar())
-    return LLTCodeGen(
-        LLT::vector(VT.getVectorElementCount(), VT.getScalarSizeInBits()));
+    return LLTCodeGen(LLT(VT));
 
   if (VT.isInteger() || VT.isFloatingPoint())
-    return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+    return LLTCodeGen(LLT(VT));
 
   return std::nullopt;
 }

diff  --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
index a51cbd26fcaf5..e9608bb7a9a7a 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
@@ -8,11 +8,21 @@
 
 #include "GlobalISelMatchTableExecutorEmitter.h"
 #include "GlobalISelMatchTable.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/TableGen/CodeGenHelpers.h"
 
 using namespace llvm;
 using namespace llvm::gi;
 
+static cl::opt<bool>
+    AllowExtendedLLT("gisel-extended-llt",
+                     cl::desc("Generate extended llt names in match tables"),
+                     cl::init(false));
+
+GlobalISelMatchTableExecutorEmitter::GlobalISelMatchTableExecutorEmitter() {
+  LLT::setUseExtended(AllowExtendedLLT);
+}
+
 void GlobalISelMatchTableExecutorEmitter::emitSubtargetFeatureBitsetImpl(
     raw_ostream &OS, ArrayRef<RuleMatcher> Rules) {
   SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,

diff  --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h
index 1f66d73f77576..c56e0549f1ec6 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h
@@ -188,7 +188,7 @@ class GlobalISelMatchTableExecutorEmitter {
                                Comment);
   }
 
-  GlobalISelMatchTableExecutorEmitter() = default;
+  GlobalISelMatchTableExecutorEmitter();
 
 public:
   virtual ~GlobalISelMatchTableExecutorEmitter() = default;


        


More information about the llvm-commits mailing list