[llvm] [GlobalISel][LLT] Introduce FPInfo for LLT (PR #155107)

via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 24 07:20:05 PDT 2025


https://github.com/DenisGZM updated https://github.com/llvm/llvm-project/pull/155107

>From 6baa709de27e3bcdf36e044b3f87b064857a83de Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 4 Oct 2024 09:20:06 +0000
Subject: [PATCH 1/9] add isScalar and isFixedVector

---
 llvm/include/llvm/CodeGenTypes/LowLevelType.h | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index d8e0848aff84d..d466dccd9e8df 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -147,6 +147,9 @@ class LLT {
   constexpr bool isScalar() const { return IsScalar; }
   constexpr bool isToken() const { return IsScalar && RawData == 0; };
   constexpr bool isVector() const { return isValid() && IsVector; }
+  constexpr bool isScalar(unsigned Size) const {
+    return isScalar() && getScalarSizeInBits() == Size;
+  }
   constexpr bool isPointer() const {
     return isValid() && IsPointer && !IsVector;
   }
@@ -177,6 +180,12 @@ class LLT {
   /// if the LLT is not a vector type.
   constexpr bool isFixedVector() const { return isVector() && !isScalable(); }
 
+  constexpr bool isFixedVector(unsigned NumElements,
+                               unsigned ScalarSize) const {
+    return isFixedVector() && getNumElements() == NumElements &&
+           getScalarSizeInBits() == ScalarSize;
+  }
+
   /// Returns true if the LLT is a scalable vector. Returns false otherwise,
   /// even if the LLT is not a vector type.
   constexpr bool isScalableVector() const { return isVector() && isScalable(); }

>From cc16885507e4d578f2c7d849428066acc826e291 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Thu, 6 Feb 2025 13:16:43 +0100
Subject: [PATCH 2/9] changeElementCount

---
 .../llvm/CodeGen/GlobalISel/LegalizerInfo.h        |  2 +-
 llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp  |  6 ++----
 .../CodeGen/GlobalISel/LegalizerInfoTest.cpp       | 14 ++++++++------
 3 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index fd72a3898562e..8412042a780e8 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -380,7 +380,7 @@ LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx,
 
 /// Keep the same scalar or element type as \p TypeIdx, but take the number of
 /// elements from \p Ty.
-LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx, LLT Ty);
+LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx, ElementCount EC);
 
 /// Change the scalar size or element size to have the same scalar size as type
 /// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
index 25c1db91b05d8..ded4df4edc14c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
@@ -55,12 +55,10 @@ LegalizeMutation LegalizeMutations::changeElementCountTo(unsigned TypeIdx,
 }
 
 LegalizeMutation LegalizeMutations::changeElementCountTo(unsigned TypeIdx,
-                                                         LLT NewEltTy) {
+                                                         ElementCount EC) {
   return [=](const LegalityQuery &Query) {
     const LLT OldTy = Query.Types[TypeIdx];
-    ElementCount NewEltCount = NewEltTy.isVector() ? NewEltTy.getElementCount()
-                                                   : ElementCount::getFixed(1);
-    return std::make_pair(TypeIdx, OldTy.changeElementCount(NewEltCount));
+    return std::make_pair(TypeIdx, OldTy.changeElementCount(EC));
   };
 }
 
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
index 988e307909232..836c81b524672 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
@@ -420,12 +420,14 @@ TEST(LegalizerInfoTest, RuleSets) {
 
     // Raw type form
     LI.getActionDefinitionsBuilder(G_ADD)
-      .fewerElementsIf(typeIs(0, v4s32), changeElementCountTo(0, v2s32))
-      .fewerElementsIf(typeIs(0, v8s32), changeElementCountTo(0, s32))
-      .fewerElementsIf(typeIs(0, LLT::scalable_vector(4, 16)),
-                       changeElementCountTo(0, LLT::scalable_vector(2, 16)))
-      .fewerElementsIf(typeIs(0, LLT::scalable_vector(8, 16)),
-                       changeElementCountTo(0, s16));
+        .fewerElementsIf(typeIs(0, v4s32),
+                         changeElementCountTo(0, ElementCount::getFixed(2)))
+        .fewerElementsIf(typeIs(0, v8s32),
+                         changeElementCountTo(0, ElementCount::getFixed(1)))
+        .fewerElementsIf(typeIs(0, LLT::scalable_vector(4, s16)),
+                         changeElementCountTo(0, ElementCount::getScalable(2)))
+        .fewerElementsIf(typeIs(0, LLT::scalable_vector(8, s16)),
+                         changeElementCountTo(0, ElementCount::getFixed(1)));
 
     LegacyInfo.computeTables();
 

>From f04aed6fb3b914a563074ebe27b6efd17d993317 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Wed, 12 Feb 2025 13:27:53 +0100
Subject: [PATCH 3/9] FPInfo: LLT changes

---
 llvm/include/llvm/CodeGen/LowLevelTypeUtils.h |   2 +-
 llvm/include/llvm/CodeGenTypes/LowLevelType.h | 345 +++++++++++++-----
 .../CodeGen/GlobalISel/MachineIRBuilder.cpp   |   3 +-
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |  75 +++-
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       | 102 ++++--
 llvm/lib/CodeGenTypes/LowLevelType.cpp        |  60 ++-
 .../GlobalISel/GlobalISelMatchTable.cpp       | 101 +++--
 7 files changed, 490 insertions(+), 198 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
index 51a298eb8b247..f0c1758a4c9cc 100644
--- a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
+++ b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
@@ -41,6 +41,6 @@ LLVM_ABI LLT getLLTForMVT(MVT Ty);
 /// Get the appropriate floating point arithmetic semantic based on the bit size
 /// of the given scalar LLT.
 LLVM_ABI const llvm::fltSemantics &getFltSemanticForLLT(LLT Ty);
-}
+} // namespace llvm
 
 #endif // LLVM_CODEGEN_LOWLEVELTYPEUTILS_H
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index d466dccd9e8df..32572d6753c61 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -30,6 +30,7 @@
 #include "llvm/CodeGenTypes/MachineValueType.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
 #include <cassert>
 
 namespace llvm {
@@ -39,68 +40,150 @@ class raw_ostream;
 
 class LLT {
 public:
+  enum class FPVariant {
+    IEEE_FLOAT = 0x0,
+    BRAIN_FLOAT = 0x1,     // BRAIN_FLOAT
+    VARIANT_FLOAT_2 = 0x2, // PPC_FLOAT
+    VARIANT_FLOAT_3 = 0x3, // FP80
+    VARIANT_FLOAT_4 = 0x4, // TENSOR_FLOAT
+    VARIANT_FLOAT_5 = 0x5, // UNASSIGNED
+    VARIANT_FLOAT_6 = 0x6, // UNASSIGNED
+    VARIANT_FLOAT_7 = 0x7, // UNASSIGNED
+  };
+
+  enum class Kind : uint64_t {
+    INVALID = 0b000,
+    INTEGER = 0b001,
+    FLOAT = 0b010,
+    POINTER = 0b011,
+    VECTOR_INTEGER = 0b101,
+    VECTOR_FLOAT = 0b110,
+    VECTOR_POINTER = 0b111,
+  };
+
+  constexpr static Kind toVector(Kind Ty) {
+    if (Ty == Kind::POINTER)
+      return Kind::VECTOR_POINTER;
+
+    if (Ty == Kind::INTEGER)
+      return Kind::VECTOR_INTEGER;
+
+    if (Ty == Kind::FLOAT)
+      return Kind::VECTOR_FLOAT;
+
+    llvm_unreachable("Type is already a vector type");
+  }
+
+  constexpr static Kind toScalar(Kind Ty) {
+    if (Ty == Kind::VECTOR_POINTER)
+      return Kind::POINTER;
+
+    if (Ty == Kind::VECTOR_INTEGER)
+      return Kind::INTEGER;
+
+    if (Ty == Kind::VECTOR_FLOAT)
+      return Kind::FLOAT;
+
+    llvm_unreachable("Type is already a scalar type");
+  }
+
   /// Get a low-level scalar or aggregate "bag of bits".
-  static constexpr LLT scalar(unsigned SizeInBits) {
-    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
-               ElementCount::getFixed(0), SizeInBits,
-               /*AddressSpace=*/0};
+  [[deprecated("Use LLT::integer(unsigned) instead.")]] static constexpr LLT
+  scalar(unsigned SizeInBits) {
+    return LLT{Kind::INTEGER, ElementCount::getFixed(0), SizeInBits,
+               /*AddressSpace=*/0, static_cast<FPVariant>(0)};
+  }
+
+  static constexpr LLT integer(unsigned SizeInBits) {
+    return LLT{Kind::INTEGER, ElementCount::getFixed(0), SizeInBits,
+               /*AddressSpace=*/0, static_cast<FPVariant>(0)};
+  }
+
+  static constexpr LLT floatingPoint(unsigned SizeInBits, FPVariant FP) {
+    return LLT{Kind::FLOAT, ElementCount::getFixed(0), SizeInBits,
+               /*AddressSpace=*/0, FP};
   }
 
   /// Get a low-level token; just a scalar with zero bits (or no size).
   static constexpr LLT token() {
-    return LLT{/*isPointer=*/false, /*isVector=*/false,
-               /*isScalar=*/true,   ElementCount::getFixed(0),
+    return LLT{Kind::INTEGER, ElementCount::getFixed(0),
                /*SizeInBits=*/0,
-               /*AddressSpace=*/0};
+               /*AddressSpace=*/0, static_cast<FPVariant>(0)};
   }
 
   /// Get a low-level pointer in the given address space.
   static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
     assert(SizeInBits > 0 && "invalid pointer size");
-    return LLT{/*isPointer=*/true, /*isVector=*/false, /*isScalar=*/false,
-               ElementCount::getFixed(0), SizeInBits, AddressSpace};
+    return LLT{Kind::POINTER, ElementCount::getFixed(0), SizeInBits,
+               AddressSpace, static_cast<FPVariant>(0)};
   }
 
   /// Get a low-level vector of some number of elements and element width.
-  static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
+  [[deprecated("Use LLT::vector(EC, LLT) instead.")]] static constexpr LLT
+  vector(ElementCount EC, unsigned ScalarSizeInBits) {
     assert(!EC.isScalar() && "invalid number of vector elements");
-    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false,
-               EC, ScalarSizeInBits, /*AddressSpace=*/0};
+    return LLT{Kind::VECTOR_INTEGER, EC, ScalarSizeInBits,
+               /*AddressSpace=*/0, static_cast<FPVariant>(0)};
   }
 
   /// Get a low-level vector of some number of elements and element type.
   static constexpr LLT vector(ElementCount EC, LLT ScalarTy) {
     assert(!EC.isScalar() && "invalid number of vector elements");
     assert(!ScalarTy.isVector() && "invalid vector element type");
-    return LLT{ScalarTy.isPointer(),
-               /*isVector=*/true,
-               /*isScalar=*/false,
-               EC,
-               ScalarTy.getSizeInBits().getFixedValue(),
-               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
+
+    Kind Info = toVector(ScalarTy.Info);
+    return LLT{Info, EC, ScalarTy.getSizeInBits().getFixedValue(),
+               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0,
+               ScalarTy.isFloat() ? ScalarTy.getFPVariant()
+                                  : static_cast<FPVariant>(0)};
+  }
+  // Get a 8-bit brain float value.
+  static constexpr LLT bfloat8() {
+    return floatingPoint(8, FPVariant::BRAIN_FLOAT);
+  }
+
+  // Get a 16-bit brain float value.
+  static constexpr LLT bfloat16() {
+    return floatingPoint(16, FPVariant::BRAIN_FLOAT);
   }
 
   /// Get a 16-bit IEEE half value.
-  /// TODO: Add IEEE semantics to type - This currently returns a simple `scalar(16)`.
   static constexpr LLT float16() {
-    return scalar(16);
+    return floatingPoint(16, FPVariant::IEEE_FLOAT);
   }
 
   /// Get a 32-bit IEEE float value.
   static constexpr LLT float32() {
-    return scalar(32);
+    return floatingPoint(32, FPVariant::IEEE_FLOAT);
   }
 
   /// Get a 64-bit IEEE double value.
   static constexpr LLT float64() {
-    return scalar(64);
+    return floatingPoint(64, FPVariant::IEEE_FLOAT);
+  }
+
+  /// Get a 80-bit X86 floating point value.
+  static constexpr LLT x86fp80() {
+    return floatingPoint(80, FPVariant::VARIANT_FLOAT_3);
+  }
+
+  /// Get a 128-bit IEEE quad value.
+  static constexpr LLT float128() {
+    return floatingPoint(128, FPVariant::IEEE_FLOAT);
+  }
+
+  /// Get a 128-bit PowerPC double double value.
+  static constexpr LLT ppcf128() {
+    return floatingPoint(128, FPVariant::VARIANT_FLOAT_2);
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
   /// width.
-  static constexpr LLT fixed_vector(unsigned NumElements,
-                                    unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
+  [[deprecated(
+      "Use LLT::fixed_vector(unsigned, LLT) instead.")]] static constexpr LLT
+  fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits) {
+    return vector(ElementCount::getFixed(NumElements),
+                  LLT::integer(ScalarSizeInBits));
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
@@ -111,9 +194,11 @@ class LLT {
 
   /// Get a low-level scalable vector of some number of elements and element
   /// width.
-  static constexpr LLT scalable_vector(unsigned MinNumElements,
-                                       unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
+  [[deprecated(
+      "Use LLT::scalable_vector(unsigned, LLT) instead.")]] static constexpr LLT
+  scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits) {
+    return vector(ElementCount::getScalable(MinNumElements),
+                  LLT::integer(ScalarSizeInBits));
   }
 
   /// Get a low-level scalable vector of some number of elements and element
@@ -126,36 +211,81 @@ class LLT {
     return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
   }
 
-  static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
+  [[deprecated(
+      "Use LLT::scalarOrVector(EC, LLT) instead.")]] static constexpr LLT
+  scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
     assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
            "Not enough bits in LLT to represent size");
-    return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
+    return scalarOrVector(EC, LLT::integer(static_cast<unsigned>(ScalarSize)));
   }
 
-  explicit constexpr LLT(bool isPointer, bool isVector, bool isScalar,
-                         ElementCount EC, uint64_t SizeInBits,
-                         unsigned AddressSpace)
+  explicit constexpr LLT(Kind Info, ElementCount EC, uint64_t SizeInBits,
+                         unsigned AddressSpace, FPVariant FP)
       : LLT() {
-    init(isPointer, isVector, isScalar, EC, SizeInBits, AddressSpace);
+    init(Info, EC, SizeInBits, AddressSpace, FP);
   }
-  explicit constexpr LLT()
-      : IsScalar(false), IsPointer(false), IsVector(false), RawData(0) {}
 
   LLVM_ABI explicit LLT(MVT VT);
+  explicit constexpr LLT() : Info(static_cast<Kind>(0)), RawData(0) {}
 
-  constexpr bool isValid() const { return IsScalar || RawData != 0; }
-  constexpr bool isScalar() const { return IsScalar; }
-  constexpr bool isToken() const { return IsScalar && RawData == 0; };
-  constexpr bool isVector() const { return isValid() && IsVector; }
+  constexpr bool isValid() const { return isToken() || RawData != 0; }
+  constexpr bool isScalar() const { return Info == Kind::INTEGER || Info == Kind::FLOAT; }
   constexpr bool isScalar(unsigned Size) const {
     return isScalar() && getScalarSizeInBits() == Size;
   }
+  constexpr bool isFloat() const { return isValid() && Info == Kind::FLOAT; }
+  constexpr bool isFloat(unsigned Size) const {
+    return isFloat() && getScalarSizeInBits() == Size;
+  }
+  constexpr bool isVariantFloat() const {
+    return isFloat() && getFPVariant() != FPVariant::IEEE_FLOAT;
+  }
+  constexpr bool isVariantFloat(FPVariant Variant) const {
+    return isFloat() && getFPVariant() == Variant;
+  }
+  constexpr bool isVariantFloat(unsigned Size, FPVariant Variant) const {
+    return isVariantFloat(Variant) && getScalarSizeInBits() == Size;
+  }
+  constexpr bool isFloatVector() const {
+    return isVector() && Info == Kind::VECTOR_FLOAT;
+  }
+  constexpr bool isIEEEFloat(unsigned Size) const {
+    return isVariantFloat(Size, FPVariant::IEEE_FLOAT);
+  }
+  constexpr bool isBFloat(unsigned Size) const {
+    return isVariantFloat(Size, FPVariant::BRAIN_FLOAT);
+  }
+  constexpr bool isX86FP80() const {
+    return isVariantFloat(80, FPVariant::VARIANT_FLOAT_3);
+  }
+  constexpr bool isPPCF128() const {
+    return isVariantFloat(128, FPVariant::VARIANT_FLOAT_2);
+  }
+  constexpr bool isToken() const {
+    return Info == Kind::INTEGER && RawData == 0;
+  }
+  constexpr bool isInteger() const {
+    return isValid() && Info == Kind::INTEGER;
+  }
+  constexpr bool isInteger(unsigned Size) const {
+    return isInteger() && getScalarSizeInBits() == Size;
+  }
+  constexpr bool isIntegerVector() const {
+    return isVector() && Info == Kind::VECTOR_INTEGER;
+  }
+  constexpr bool isVector() const {
+    return isValid() &&
+           (Info == Kind::VECTOR_INTEGER || Info == Kind::VECTOR_FLOAT ||
+            Info == Kind::VECTOR_POINTER);
+  }
   constexpr bool isPointer() const {
-    return isValid() && IsPointer && !IsVector;
+    return isValid() && Info == Kind::POINTER;
+  }
+  constexpr bool isPointerVector() const {
+    return isVector() && Info == Kind::VECTOR_POINTER;
   }
-  constexpr bool isPointerVector() const { return IsPointer && isVector(); }
   constexpr bool isPointerOrPointerVector() const {
-    return IsPointer && isValid();
+    return isPointer() || isPointerVector();
   }
 
   /// Returns the number of elements in a vector LLT. Must only be called on
@@ -191,7 +321,7 @@ class LLT {
   constexpr bool isScalableVector() const { return isVector() && isScalable(); }
 
   constexpr ElementCount getElementCount() const {
-    assert(IsVector && "cannot get number of elements on scalar/aggregate");
+    assert(isVector() && "cannot get number of elements on scalar/aggregate");
     return ElementCount::get(getFieldValue(VectorElementsFieldInfo),
                              isScalable());
   }
@@ -216,6 +346,15 @@ class LLT {
     return isVector() ? getElementType() : *this;
   }
 
+  constexpr FPVariant getFPVariant() const {
+    assert((isFloat() || isFloatVector()) &&
+           "cannot get FP info for non float type");
+
+    return FPVariant(getFieldValue(FPFieldInfo));
+  }
+
+  constexpr Kind getKind() const { return Info; }
+
   /// If this type is a vector, return a vector with the same number of elements
   /// but the new element type. Otherwise, return the new element type.
   constexpr LLT changeElementType(LLT NewEltTy) const {
@@ -226,10 +365,10 @@ class LLT {
   /// but the new element size. Otherwise, return the new element type. Invalid
   /// for pointer types. For pointer types, use changeElementType.
   constexpr LLT changeElementSize(unsigned NewEltSize) const {
-    assert(!isPointerOrPointerVector() &&
+    assert(!isPointerOrPointerVector() && !(isFloat() || isFloatVector()) &&
            "invalid to directly change element size for pointers");
-    return isVector() ? LLT::vector(getElementCount(), NewEltSize)
-                      : LLT::scalar(NewEltSize);
+    return isVector() ? LLT::vector(getElementCount(), LLT::integer(NewEltSize))
+                      : LLT::integer(NewEltSize);
   }
 
   /// Return a vector or scalar with the same element type and the new element
@@ -238,6 +377,10 @@ class LLT {
     return LLT::scalarOrVector(EC, getScalarType());
   }
 
+  constexpr LLT changeElementCount(unsigned NumElements) const {
+    return changeElementCount(ElementCount::getFixed(NumElements));
+  }
+
   /// Return a type that is \p Factor times smaller. Reduces the number of
   /// elements if this is a vector, or the bitwidth for scalar/pointers. Does
   /// not attempt to handle cases that aren't evenly divisible.
@@ -252,7 +395,7 @@ class LLT {
     }
 
     assert(getScalarSizeInBits() % Factor == 0);
-    return scalar(getScalarSizeInBits() / Factor);
+    return integer(getScalarSizeInBits() / Factor);
   }
 
   /// Produce a vector type that is \p Factor times bigger, preserving the
@@ -286,10 +429,23 @@ class LLT {
   /// Returns the vector's element type. Only valid for vector types.
   constexpr LLT getElementType() const {
     assert(isVector() && "cannot get element type of scalar/aggregate");
-    if (IsPointer)
+    if (isPointerVector())
       return pointer(getAddressSpace(), getScalarSizeInBits());
-    else
-      return scalar(getScalarSizeInBits());
+
+    if (isFloatVector())
+      return floatingPoint(getScalarSizeInBits(), getFPVariant());
+
+    return integer(getScalarSizeInBits());
+  }
+
+  constexpr LLT changeToInteger() const {
+    if (isPointer() || isPointerVector())
+      return *this;
+
+    if (isVector())
+      return vector(getElementCount(), LLT::integer(getScalarSizeInBits()));
+
+    return integer(getSizeInBits());
   }
 
   LLVM_ABI void print(raw_ostream &OS) const;
@@ -299,8 +455,7 @@ class LLT {
 #endif
 
   constexpr bool operator==(const LLT &RHS) const {
-    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
-           IsScalar == RHS.IsScalar && RHS.RawData == RawData;
+    return Info == RHS.Info && RawData == RHS.RawData;
   }
 
   constexpr bool operator!=(const LLT &RHS) const { return !(*this == RHS); }
@@ -310,37 +465,33 @@ class LLT {
 
 private:
   /// LLT is packed into 64 bits as follows:
-  /// isScalar : 1
-  /// isPointer : 1
-  /// isVector  : 1
-  /// with 61 bits remaining for Kind-specific data, packed in bitfields
-  /// as described below. As there isn't a simple portable way to pack bits
-  /// into bitfields, here the different fields in the packed structure is
+  /// Info : 3
+  /// RawData : 61
+  /// with 61 bits of RawData remaining for Kind-specific data, packed in
+  /// bitfields as described below. As there isn't a simple portable way to pack
+  /// bits into bitfields, here the different fields in the packed structure is
   /// described in static const *Field variables. Each of these variables
   /// is a 2-element array, with the first element describing the bitfield size
   /// and the second element describing the bitfield offset.
   ///
-  /// +--------+---------+--------+----------+----------------------+
-  /// |isScalar|isPointer|isVector| RawData  |Notes                 |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   0    |    0     |Invalid               |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   1    |    0     |Tombstone Key         |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   0    |    0     |Empty Key             |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   1    |    0    |   0    |    0     |Token                 |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   1    |    0    |   0    | non-zero |Scalar                |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   0    | non-zero |Pointer               |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    0    |   1    | non-zero |Vector of non-pointer |
-  /// +--------+---------+--------+----------+----------------------+
-  /// |   0    |    1    |   1    | non-zero |Vector of pointer     |
-  /// +--------+---------+--------+----------+----------------------+
-  ///
-  /// Everything else is reserved.
+  /*
+                                --- LLT ---
+
+   63       56       47       39       31       23       15       7      0
+   |        |        |        |        |        |        |        |      |
+  |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|
+   ...................................                                      (1)
+   *****************                                                        (2)
+                     ~~~~~~~~~~~~~~~~~~~~~~~~~~                             (3)
+                                                ^^^^^^^^^^^^^^^^^           (4)
+                                                                      @     (5)
+                                            ###                             (6)
+                                                                       %%%  (7)
+
+  (1) ScalarSize  (2) PointerSize  (3) PointerAddressSpace
+  (4) VectorElements  (5) VectorScalable  (6) FPVariant  (7) Kind
+
+  */
   typedef int BitFieldInfo[2];
   ///
   /// This is how the bitfields are packed per Kind:
@@ -350,6 +501,7 @@ class LLT {
   /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
   ///   SizeInBits: 32;
   static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 29};
+  static const constexpr BitFieldInfo FPFieldInfo{3, 21};
   /// * Pointer (isPointer == 1 && isVector == 0):
   ///   SizeInBits: 16;
   ///   AddressSpace: 24;
@@ -367,9 +519,7 @@ class LLT {
   ///   AddressSpace: 24;
   ///   Scalable: 1;
 
-  uint64_t IsScalar : 1;
-  uint64_t IsPointer : 1;
-  uint64_t IsVector : 1;
+  Kind Info : 3;
   uint64_t RawData : 61;
 
   static constexpr uint64_t getMask(const BitFieldInfo FieldInfo) {
@@ -390,21 +540,21 @@ class LLT {
     return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
   }
 
-  constexpr void init(bool IsPointer, bool IsVector, bool IsScalar,
-                      ElementCount EC, uint64_t SizeInBits,
-                      unsigned AddressSpace) {
+  constexpr void init(Kind Info, ElementCount EC, uint64_t SizeInBits,
+                      unsigned AddressSpace, FPVariant FP) {
     assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
            "Not enough bits in LLT to represent size");
-    this->IsPointer = IsPointer;
-    this->IsVector = IsVector;
-    this->IsScalar = IsScalar;
-    if (IsPointer) {
+    this->Info = Info;
+    if (Info == Kind::POINTER || Info == Kind::VECTOR_POINTER) {
       RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
                 maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
     } else {
-      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo) |
+                maskAndShift((uint64_t)FP, FPFieldInfo);
     }
-    if (IsVector) {
+
+    if (Info == Kind::VECTOR_INTEGER || Info == Kind::VECTOR_FLOAT ||
+        Info == Kind::VECTOR_POINTER) {
       RawData |= maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
                  maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
     }
@@ -412,8 +562,7 @@ class LLT {
 
 public:
   constexpr uint64_t getUniqueRAWLLTData() const {
-    return ((uint64_t)RawData) << 3 | ((uint64_t)IsScalar) << 2 |
-           ((uint64_t)IsPointer) << 1 | ((uint64_t)IsVector);
+    return ((uint64_t)RawData) << 3 | ((uint64_t)Info);
   }
 };
 
@@ -422,15 +571,15 @@ inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
   return OS;
 }
 
-template<> struct DenseMapInfo<LLT> {
+template <> struct DenseMapInfo<LLT> {
   static inline LLT getEmptyKey() {
     LLT Invalid;
-    Invalid.IsPointer = true;
+    Invalid.Info = static_cast<LLT::Kind>(2);
     return Invalid;
   }
   static inline LLT getTombstoneKey() {
     LLT Invalid;
-    Invalid.IsVector = true;
+    Invalid.Info = static_cast<LLT::Kind>(3);
     return Invalid;
   }
   static inline unsigned getHashValue(const LLT &Ty) {
@@ -442,6 +591,6 @@ template<> struct DenseMapInfo<LLT> {
   }
 };
 
-}
+} // namespace llvm
 
 #endif // LLVM_CODEGEN_LOWLEVELTYPE_H
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 27df7e369436a..211809eacf4a7 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -581,7 +581,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
            Op.getLLTTy(*getMRI()).getSizeInBits())
     Opcode = TargetOpcode::G_TRUNC;
   else
-    assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
+    assert(Res.getLLTTy(*getMRI()).getSizeInBits() ==
+           Op.getLLTTy(*getMRI()).getSizeInBits());
 
   return buildInstr(Opcode, Res, Op);
 }
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index 936c9fbb2fff0..cf34bf71a8c3a 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -36,7 +36,37 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     // concerned.
     auto SizeInBits = DL.getTypeSizeInBits(&Ty);
     assert(SizeInBits != 0 && "invalid zero-sized type");
-    return LLT::scalar(SizeInBits);
+
+    if (Ty.isFloatingPointTy()) {
+      if (Ty.isHalfTy())
+        return LLT::float16();
+
+      if (Ty.isBFloatTy())
+        return LLT::bfloat16();
+
+      if (Ty.isFloatTy())
+        return LLT::float32();
+
+      if (Ty.isDoubleTy())
+        return LLT::float64();
+
+      if (Ty.isX86_FP80Ty())
+        return LLT::x86fp80();
+
+      if (Ty.isFP128Ty())
+        return LLT::float128();
+
+      if (Ty.isPPC_FP128Ty())
+        return LLT::ppcf128();
+
+      llvm_unreachable("Unhandled LLVM IR floating point type");
+    }
+
+    if (Ty.isIntegerTy()) {
+      return LLT::integer(SizeInBits);
+    }
+
+    return LLT::integer(SizeInBits);
   }
 
   if (Ty.isTokenTy())
@@ -46,12 +76,25 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
 }
 
 MVT llvm::getMVTForLLT(LLT Ty) {
-  if (!Ty.isVector())
-    return MVT::getIntegerVT(Ty.getSizeInBits());
+  if (Ty.isVector()) {
+    return MVT::getVectorVT(getMVTForLLT(Ty.getElementType()),
+                            Ty.getElementCount());
+  }
+
+  if (Ty.isFloat()) {
+    if (Ty == LLT::bfloat16())
+      return MVT::bf16;
+
+    if (Ty == LLT::x86fp80())
+      return MVT::f80;
+
+    if (Ty == LLT::ppcf128())
+      return MVT::ppcf128;
+
+    return MVT::getFloatingPointVT(Ty.getSizeInBits());
+  }
 
-  return MVT::getVectorVT(
-      MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
-      Ty.getElementCount());
+  return MVT::getIntegerVT(Ty.getSizeInBits());
 }
 
 EVT llvm::getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx) {
@@ -63,16 +106,20 @@ EVT llvm::getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx) {
   return EVT::getIntegerVT(Ctx, Ty.getSizeInBits());
 }
 
-LLT llvm::getLLTForMVT(MVT Ty) {
-  if (!Ty.isVector())
-    return LLT::scalar(Ty.getSizeInBits());
-
-  return LLT::scalarOrVector(Ty.getVectorElementCount(),
-                             Ty.getVectorElementType().getSizeInBits());
-}
+LLT llvm::getLLTForMVT(MVT Ty) { return LLT(Ty); }
 
 const llvm::fltSemantics &llvm::getFltSemanticForLLT(LLT Ty) {
-  assert(Ty.isScalar() && "Expected a scalar type.");
+  assert(Ty.isFloat() && "Expected a scalar type.");
+
+  if (Ty.isBFloat(16))
+    return APFloat::BFloat();
+  if (Ty.isX86FP80())
+    return APFloat::x87DoubleExtended();
+  if (Ty.isPPCF128())
+    return APFloat::PPCDoubleDouble();
+
+  assert(!Ty.isVariantFloat() && "Unhandled variant float type");
+
   switch (Ty.getSizeInBits()) {
   case 16:
     return APFloat::IEEEhalf();
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 6a464d9dd6886..b0cd260fadd7e 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1929,26 +1929,33 @@ static bool verifyAddrSpace(uint64_t AddrSpace) {
 }
 
 bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
-  if (Token.range().front() == 's' || Token.range().front() == 'p') {
-    StringRef SizeStr = Token.range().drop_front();
-    if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-      return error("expected integers after 's'/'p' type character");
-  }
-
-  if (Token.range().front() == 's') {
-    auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
-    if (ScalarSize) {
-      if (!verifyScalarSize(ScalarSize))
-        return error("invalid size for scalar type");
-      Ty = LLT::scalar(ScalarSize);
-    } else {
+  StringRef TypeDigits = Token.range();
+  if (TypeDigits.consume_front("s") || TypeDigits.consume_front("i") ||
+      TypeDigits.consume_front("f") || TypeDigits.consume_front("p") ||
+      TypeDigits.consume_front("bf")) {
+    if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
+      return error("expected integers after 's'/'i'/'f'/'bf'/'p' type prefix");
+  }
+
+  if (Token.range().starts_with("s") || Token.range().starts_with("i")) {
+    auto ScalarSize = APSInt(TypeDigits).getZExtValue();
+    if (!ScalarSize) {
       Ty = LLT::token();
+      lex();
+      return false;
     }
+
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for scalar type");
+
+    Ty = LLT::integer(ScalarSize);
     lex();
     return false;
-  } else if (Token.range().front() == 'p') {
+  }
+
+  if (Token.range().starts_with("p")) {
     const DataLayout &DL = MF.getDataLayout();
-    uint64_t AS = APSInt(Token.range().drop_front()).getZExtValue();
+    uint64_t AS = APSInt(TypeDigits).getZExtValue();
     if (!verifyAddrSpace(AS))
       return error("invalid address space number");
 
@@ -1957,6 +1964,23 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
     return false;
   }
 
+  if (Token.range().starts_with("f") || Token.range().starts_with("bf")) {
+    LLT::FPVariant FPVariant;
+    if (Token.range().starts_with("f")) {
+      FPVariant = LLT::FPVariant::IEEE_FLOAT;
+    } else if (Token.range().starts_with("bf")) {
+      FPVariant = LLT::FPVariant::BRAIN_FLOAT;
+    } else {
+      return error("unknown floating point type identifier");
+    }
+    auto ScalarSize = APSInt(TypeDigits).getZExtValue();
+    if (!ScalarSize || !verifyScalarSize(ScalarSize))
+      return error("invalid size for scalar type");
+    Ty = LLT::floatingPoint(ScalarSize, FPVariant);
+    lex();
+    return false;
+  }
+
   // Now we're looking for a vector.
   if (Token.isNot(MIToken::less))
     return error(Loc, "expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, "
@@ -1991,25 +2015,39 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
     return GetError();
   lex();
 
-  if (Token.range().front() != 's' && Token.range().front() != 'p')
+  StringRef VectorTyDigits = Token.range();
+  if (!VectorTyDigits.consume_front("s") &&
+      !VectorTyDigits.consume_front("i") &&
+      !VectorTyDigits.consume_front("f") &&
+      !VectorTyDigits.consume_front("p") && !VectorTyDigits.consume_front("bf"))
     return GetError();
 
-  StringRef SizeStr = Token.range().drop_front();
-  if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-    return error("expected integers after 's'/'p' type character");
+  if (VectorTyDigits.empty() || !llvm::all_of(VectorTyDigits, isdigit))
+    return error(
+        "expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
 
-  if (Token.range().front() == 's') {
-    auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
+  if (Token.range().starts_with("s") || Token.range().starts_with("i")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
     if (!verifyScalarSize(ScalarSize))
       return error("invalid size for scalar element in vector");
-    Ty = LLT::scalar(ScalarSize);
-  } else if (Token.range().front() == 'p') {
+    Ty = LLT::integer(ScalarSize);
+  } else if (Token.range().starts_with("p")) {
     const DataLayout &DL = MF.getDataLayout();
-    uint64_t AS = APSInt(Token.range().drop_front()).getZExtValue();
+    uint64_t AS = APSInt(VectorTyDigits).getZExtValue();
     if (!verifyAddrSpace(AS))
       return error("invalid address space number");
 
     Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
+  } else if (Token.range().starts_with("f")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for float element in vector");
+    Ty = LLT::floatingPoint(ScalarSize, LLT::FPVariant::IEEE_FLOAT);
+  } else if (Token.range().starts_with("bf")) {
+    auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
+    if (!verifyScalarSize(ScalarSize))
+      return error("invalid size for bfloat element in vector");
+    Ty = LLT::floatingPoint(ScalarSize, LLT::FPVariant::BRAIN_FLOAT);
   } else
     return GetError();
   lex();
@@ -2025,14 +2063,14 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
 
 bool MIParser::parseTypedImmediateOperand(MachineOperand &Dest) {
   assert(Token.is(MIToken::Identifier));
-  StringRef TypeStr = Token.range();
-  if (TypeStr.front() != 'i' && TypeStr.front() != 's' &&
-      TypeStr.front() != 'p')
-    return error(
-        "a typed immediate operand should start with one of 'i', 's', or 'p'");
-  StringRef SizeStr = Token.range().drop_front();
-  if (SizeStr.size() == 0 || !llvm::all_of(SizeStr, isdigit))
-    return error("expected integers after 'i'/'s'/'p' type character");
+  StringRef TypeDigits = Token.range();
+  if (!TypeDigits.consume_front("i") && !TypeDigits.consume_front("s") &&
+      !TypeDigits.consume_front("p") && !TypeDigits.consume_front("f") &&
+      !TypeDigits.consume_front("bf"))
+    return error("a typed immediate operand should start with one of 'i', "
+                 "'s','f','bf', or 'p'");
+  if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
+    return error("expected integers after 'i'/'s'/'f'/'bf'/'p' type character");
 
   auto Loc = Token.location();
   lex();
diff --git a/llvm/lib/CodeGenTypes/LowLevelType.cpp b/llvm/lib/CodeGenTypes/LowLevelType.cpp
index 4785f2652b00e..925b4efaf0edf 100644
--- a/llvm/lib/CodeGenTypes/LowLevelType.cpp
+++ b/llvm/lib/CodeGenTypes/LowLevelType.cpp
@@ -16,22 +16,45 @@
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
+static std::optional<LLT::FPVariant> deriveFPInfo(MVT VT) {
+  if (!VT.isFloatingPoint())
+    return std::nullopt;
+
+  switch (VT.getScalarType().SimpleTy) {
+  case MVT::bf16:
+    return LLT::FPVariant::BRAIN_FLOAT;
+  case MVT::f80:
+    return LLT::FPVariant::VARIANT_FLOAT_3;
+  case MVT::ppcf128:
+    return LLT::FPVariant::VARIANT_FLOAT_2;
+  default:
+    return LLT::FPVariant::IEEE_FLOAT;
+  }
+}
+
 LLT::LLT(MVT VT) {
+  auto FP = deriveFPInfo(VT);
+  bool AsVector = VT.isVector() &&
+                  (VT.getVectorMinNumElements() > 1 || VT.isScalableVector());
+
+  Kind Info;
+  if (FP.has_value())
+    Info = AsVector ? Kind::VECTOR_FLOAT : Kind::FLOAT;
+  else
+    Info = AsVector ? Kind::VECTOR_INTEGER : Kind::INTEGER;
+
   if (VT.isVector()) {
-    bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
-    init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
-         VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
-         /*AddressSpace=*/0);
+    init(Info, VT.getVectorElementCount(),
+         VT.getVectorElementType().getSizeInBits(),
+         /*AddressSpace=*/0, FP.value_or(FPVariant::IEEE_FLOAT));
   } else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
     // Aggregates are no different from real scalars as far as GlobalISel is
     // concerned.
-    init(/*IsPointer=*/false, /*IsVector=*/false, /*IsScalar=*/true,
-         ElementCount::getFixed(0), VT.getSizeInBits(), /*AddressSpace=*/0);
+    init(Info, ElementCount::getFixed(0), VT.getSizeInBits(),
+         /*AddressSpace=*/0, FP.value_or(FPVariant::IEEE_FLOAT));
   } else {
-    IsScalar = false;
-    IsPointer = false;
-    IsVector = false;
-    RawData = 0;
+    this->Info = static_cast<Kind>(0);
+    this->RawData = 0;
   }
 }
 
@@ -39,13 +62,23 @@ void LLT::print(raw_ostream &OS) const {
   if (isVector()) {
     OS << "<";
     OS << getElementCount() << " x " << getElementType() << ">";
-  } else if (isPointer())
+  } else if (isPointer()) {
     OS << "p" << getAddressSpace();
-  else if (isValid()) {
+  } else if (isBFloat(16)) {
+    OS << "bf16";
+  } else if (isPPCF128()) {
+    OS << "ppcf128";
+  } else if (isFloat()) {
+    assert(!isVariantFloat() && "unknown float variant");
+    OS << "f" << getScalarSizeInBits();
+  } else if (isInteger()) {
+    OS << "i" << getScalarSizeInBits();
+  } else if (isValid()) {
     assert(isScalar() && "unexpected type");
     OS << "s" << getScalarSizeInBits();
-  } else
+  } else {
     OS << "LLT_invalid";
+  }
 }
 
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -56,6 +89,7 @@ LLVM_DUMP_METHOD void LLT::dump() const {
 #endif
 
 const constexpr LLT::BitFieldInfo LLT::ScalarSizeFieldInfo;
+const constexpr LLT::BitFieldInfo LLT::FPFieldInfo;
 const constexpr LLT::BitFieldInfo LLT::PointerSizeFieldInfo;
 const constexpr LLT::BitFieldInfo LLT::PointerAddressSpaceFieldInfo;
 const constexpr LLT::BitFieldInfo LLT::VectorElementsFieldInfo;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index efaf05adc8f06..16a12cabce4c5 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -360,42 +360,95 @@ std::string LLTCodeGen::getCxxEnumValue() const {
 
 void LLTCodeGen::emitCxxEnumValue(raw_ostream &OS) const {
   if (Ty.isScalar()) {
-    OS << "GILLT_s" << Ty.getSizeInBits();
+    if (Ty.isBFloat(16))
+      OS << "GILLT_bf16";
+    else if (Ty.isPPCF128())
+      OS << "GILLT_ppcf128";
+    else if (Ty.isX86FP80())
+      OS << "GILLT_x86fp80";
+    else if (Ty.isFloat())
+      OS << "GILLT_f" << Ty.getSizeInBits();
+    else if (Ty.isInteger())
+      OS << "GILLT_i" << Ty.getSizeInBits();
+    else
+      OS << "GILLT_s" << Ty.getSizeInBits();
     return;
   }
   if (Ty.isVector()) {
     OS << (Ty.isScalable() ? "GILLT_nxv" : "GILLT_v")
-       << Ty.getElementCount().getKnownMinValue() << "s"
-       << Ty.getScalarSizeInBits();
+       << Ty.getElementCount().getKnownMinValue();
+
+    LLT ElemTy = Ty.getElementType();
+    if (ElemTy.isBFloat(16))
+      OS << "bf16";
+    else if (ElemTy.isPPCF128())
+      OS << "ppcf128";
+    else if (ElemTy.isX86FP80())
+      OS << "x86fp80";
+    else if (ElemTy.isFloat())
+      OS << "f" << ElemTy.getSizeInBits();
+    else if (Ty.isInteger())
+      OS << "i" << ElemTy.getSizeInBits();
+    else
+      OS << "s" << ElemTy.getSizeInBits();
     return;
   }
+
   if (Ty.isPointer()) {
     OS << "GILLT_p" << Ty.getAddressSpace();
     if (Ty.getSizeInBits() > 0)
       OS << "s" << Ty.getSizeInBits();
     return;
   }
+
   llvm_unreachable("Unhandled LLT");
 }
 
 void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
   if (Ty.isScalar()) {
-    OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
+    if (Ty.isInteger())
+      OS << "LLT::integer(" << Ty.getScalarSizeInBits() << ")";
+    else if (Ty.isBFloat(16))
+      OS << "LLT::bfloat()";
+    else if (Ty.isPPCF128())
+      OS << "LLT::ppcf128()";
+    else if (Ty.isX86FP80())
+      OS << "LLT::x86fp80()";
+    else if (Ty.isFloat())
+      OS << "LLT::floatingPoint(" << Ty.getScalarSizeInBits()
+         << ", LLT::FPVariant::IEEE_FLOAT)";
     return;
   }
+
   if (Ty.isVector()) {
     OS << "LLT::vector("
        << (Ty.isScalable() ? "ElementCount::getScalable("
                            : "ElementCount::getFixed(")
-       << Ty.getElementCount().getKnownMinValue() << "), "
-       << Ty.getScalarSizeInBits() << ")";
+       << Ty.getElementCount().getKnownMinValue() << "), ";
+
+    LLT ElemTy = Ty.getElementType();
+    if (ElemTy.isInteger())
+      OS << "LLT::integer(" << ElemTy.getScalarSizeInBits() << ")";
+    else if (ElemTy.isBFloat(16))
+      OS << "LLT::bfloat()";
+    else if (ElemTy.isPPCF128())
+      OS << "LLT::ppcf128()";
+    else if (ElemTy.isX86FP80())
+      OS << "LLT::x86fp80()";
+    else if (ElemTy.isFloat())
+      OS << "LLT::floatingPoint(" << ElemTy.getScalarSizeInBits()
+         << ", LLT::FPVariant::IEEE_FLOAT)";
+
+    OS << ")";
     return;
   }
+
   if (Ty.isPointer() && Ty.getSizeInBits() > 0) {
     OS << "LLT::pointer(" << Ty.getAddressSpace() << ", " << Ty.getSizeInBits()
        << ")";
     return;
   }
+
   llvm_unreachable("Unhandled LLT");
 }
 
@@ -403,36 +456,7 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
 /// particular logic behind the order but either A < B or B < A must be
 /// true if A != B.
 bool LLTCodeGen::operator<(const LLTCodeGen &Other) const {
-  if (Ty.isValid() != Other.Ty.isValid())
-    return Ty.isValid() < Other.Ty.isValid();
-  if (!Ty.isValid())
-    return false;
-
-  if (Ty.isVector() != Other.Ty.isVector())
-    return Ty.isVector() < Other.Ty.isVector();
-  if (Ty.isScalar() != Other.Ty.isScalar())
-    return Ty.isScalar() < Other.Ty.isScalar();
-  if (Ty.isPointer() != Other.Ty.isPointer())
-    return Ty.isPointer() < Other.Ty.isPointer();
-
-  if (Ty.isPointer() && Ty.getAddressSpace() != Other.Ty.getAddressSpace())
-    return Ty.getAddressSpace() < Other.Ty.getAddressSpace();
-
-  if (Ty.isVector() && Ty.getElementCount() != Other.Ty.getElementCount())
-    return std::tuple(Ty.isScalable(),
-                      Ty.getElementCount().getKnownMinValue()) <
-           std::tuple(Other.Ty.isScalable(),
-                      Other.Ty.getElementCount().getKnownMinValue());
-
-  assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
-         "Unexpected mismatch of scalable property");
-  return Ty.isVector()
-             ? std::tuple(Ty.isScalable(),
-                          Ty.getSizeInBits().getKnownMinValue()) <
-                   std::tuple(Other.Ty.isScalable(),
-                              Other.Ty.getSizeInBits().getKnownMinValue())
-             : Ty.getSizeInBits().getFixedValue() <
-                   Other.Ty.getSizeInBits().getFixedValue();
+  return Ty.getUniqueRAWLLTData() < Other.Ty.getUniqueRAWLLTData();
 }
 
 //===- LLTCodeGen Helpers -------------------------------------------------===//
@@ -441,11 +465,10 @@ std::optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
   MVT VT(SVT);
 
   if (VT.isVector() && !VT.getVectorElementCount().isScalar())
-    return LLTCodeGen(
-        LLT::vector(VT.getVectorElementCount(), VT.getScalarSizeInBits()));
+    return LLTCodeGen(LLT(VT));
 
   if (VT.isInteger() || VT.isFloatingPoint())
-    return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+    return LLTCodeGen(LLT(VT));
 
   return std::nullopt;
 }

>From 79ba6eb82ba370025a4d90da8e3b4a050620e95b Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Fri, 22 Aug 2025 19:13:47 +0300
Subject: [PATCH 4/9] Return default logic for LLT::scalar. Allow target to
 enable and disable extended types for llvm-ir lowering in TargetLoweringBase.
 Added flag for GISel emitter to enable/disable extended types in matcher
 tables

---
 llvm/include/llvm/CodeGen/Analysis.h          |   3 +-
 llvm/include/llvm/CodeGen/LowLevelTypeUtils.h |   4 +-
 llvm/include/llvm/CodeGen/TargetLowering.h    |   3 +
 llvm/include/llvm/CodeGenTypes/LowLevelType.h | 134 ++++++++++--------
 llvm/include/llvm/Target/TargetMachine.h      |   1 +
 llvm/include/llvm/Target/TargetOptions.h      |   7 +-
 llvm/lib/CodeGen/Analysis.cpp                 |  10 +-
 llvm/lib/CodeGen/GlobalISel/CallLowering.cpp  |  13 +-
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  50 +++----
 llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp  |   2 +-
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |  22 ++-
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       |  23 +--
 .../CodeGen/SelectionDAG/SelectionDAGISel.cpp |   4 +-
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |   8 ++
 llvm/lib/CodeGenTypes/LowLevelType.cpp        |  35 +++--
 llvm/lib/Support/TypeSize.cpp                 |   2 +-
 .../AArch64/GISel/AArch64CallLowering.cpp     |   4 +-
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |   2 +-
 llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |   5 +-
 .../Target/AMDGPU/AMDGPUCombinerHelper.cpp    |   8 +-
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  17 +--
 .../GlobalISel/GlobalISelMatchTable.cpp       |  46 +++++-
 22 files changed, 255 insertions(+), 148 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/Analysis.h b/llvm/include/llvm/CodeGen/Analysis.h
index 98b52579d03b7..2aadc27930b7a 100644
--- a/llvm/include/llvm/CodeGen/Analysis.h
+++ b/llvm/include/llvm/CodeGen/Analysis.h
@@ -101,7 +101,8 @@ inline void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
 /// If Offsets is non-null, it points to a vector to be filled in
 /// with the in-memory offsets of each of the individual values.
 ///
-void computeValueLLTs(const DataLayout &DL, Type &Ty,
+void computeValueLLTs(const TargetLowering &TLI,
+                      const DataLayout &DL, Type &Ty,
                       SmallVectorImpl<LLT> &ValueTys,
                       SmallVectorImpl<uint64_t> *Offsets = nullptr,
                       uint64_t StartingOffset = 0);
diff --git a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
index f0c1758a4c9cc..1c8f34b1a8518 100644
--- a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
+++ b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
@@ -27,7 +27,7 @@ class Type;
 struct fltSemantics;
 
 /// Construct a low-level type based on an LLVM type.
-LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL);
+LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL, bool AllowExtendedLLT = false);
 
 /// Get a rough equivalent of an MVT for a given LLT. MVT can't distinguish
 /// pointers, so these will convert to a plain integer.
@@ -36,7 +36,7 @@ LLVM_ABI EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx);
 
 /// Get a rough equivalent of an LLT for a given MVT. LLT does not yet support
 /// scalarable vector types, and will assert if used.
-LLVM_ABI LLT getLLTForMVT(MVT Ty);
+LLVM_ABI LLT getLLTForMVT(MVT Ty, bool AllowExtendedLLT = false);
 
 /// Get the appropriate floating point arithmetic semantic based on the bit size
 /// of the given scalar LLT.
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 4480ced637456..f4fdd8c69ea9e 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -416,6 +416,9 @@ class LLVM_ABI TargetLoweringBase {
   /// amounts, returns MVT::i32.
   EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
 
+  virtual LLT getLLTForType(Type &Ty, const DataLayout &DL) const;
+  virtual LLT getLLTForMVT(MVT Ty) const;
+
   /// Return the preferred type to use for a shift opcode, given the shifted
   /// amount type is \p ShiftValueTy.
   LLVM_READONLY
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 32572d6753c61..daec87ab536c0 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -30,7 +30,6 @@
 #include "llvm/CodeGenTypes/MachineValueType.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
 #include <cassert>
 
 namespace llvm {
@@ -52,13 +51,15 @@ class LLT {
   };
 
   enum class Kind : uint64_t {
-    INVALID = 0b000,
-    INTEGER = 0b001,
-    FLOAT = 0b010,
-    POINTER = 0b011,
-    VECTOR_INTEGER = 0b101,
-    VECTOR_FLOAT = 0b110,
-    VECTOR_POINTER = 0b111,
+    INVALID = 0b0000,
+    ANY_SCALAR = 0b0001,
+    INTEGER = 0b0010,
+    FLOAT = 0b0011,
+    POINTER = 0b0100,
+    VECTOR_ANY = 0b0101,
+    VECTOR_INTEGER = 0b0110,
+    VECTOR_FLOAT = 0b0111,
+    VECTOR_POINTER = 0b1000,
   };
 
   constexpr static Kind toVector(Kind Ty) {
@@ -71,7 +72,7 @@ class LLT {
     if (Ty == Kind::FLOAT)
       return Kind::VECTOR_FLOAT;
 
-    llvm_unreachable("Type is already a vector type");
+    return Kind::VECTOR_ANY;
   }
 
   constexpr static Kind toScalar(Kind Ty) {
@@ -84,13 +85,12 @@ class LLT {
     if (Ty == Kind::VECTOR_FLOAT)
       return Kind::FLOAT;
 
-    llvm_unreachable("Type is already a scalar type");
+    return Kind::ANY_SCALAR;
   }
 
   /// Get a low-level scalar or aggregate "bag of bits".
-  [[deprecated("Use LLT::integer(unsigned) instead.")]] static constexpr LLT
-  scalar(unsigned SizeInBits) {
-    return LLT{Kind::INTEGER, ElementCount::getFixed(0), SizeInBits,
+  static constexpr LLT scalar(unsigned SizeInBits) {
+    return LLT{Kind::ANY_SCALAR, ElementCount::getFixed(0), SizeInBits,
                /*AddressSpace=*/0, static_cast<FPVariant>(0)};
   }
 
@@ -106,7 +106,7 @@ class LLT {
 
   /// Get a low-level token; just a scalar with zero bits (or no size).
   static constexpr LLT token() {
-    return LLT{Kind::INTEGER, ElementCount::getFixed(0),
+    return LLT{Kind::ANY_SCALAR, ElementCount::getFixed(0),
                /*SizeInBits=*/0,
                /*AddressSpace=*/0, static_cast<FPVariant>(0)};
   }
@@ -119,10 +119,9 @@ class LLT {
   }
 
   /// Get a low-level vector of some number of elements and element width.
-  [[deprecated("Use LLT::vector(EC, LLT) instead.")]] static constexpr LLT
-  vector(ElementCount EC, unsigned ScalarSizeInBits) {
+  static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
     assert(!EC.isScalar() && "invalid number of vector elements");
-    return LLT{Kind::VECTOR_INTEGER, EC, ScalarSizeInBits,
+    return LLT{Kind::VECTOR_ANY, EC, ScalarSizeInBits,
                /*AddressSpace=*/0, static_cast<FPVariant>(0)};
   }
 
@@ -179,11 +178,9 @@ class LLT {
 
   /// Get a low-level fixed-width vector of some number of elements and element
   /// width.
-  [[deprecated(
-      "Use LLT::fixed_vector(unsigned, LLT) instead.")]] static constexpr LLT
-  fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits) {
+  static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits) {
     return vector(ElementCount::getFixed(NumElements),
-                  LLT::integer(ScalarSizeInBits));
+                  LLT::scalar(ScalarSizeInBits));
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
@@ -194,11 +191,9 @@ class LLT {
 
   /// Get a low-level scalable vector of some number of elements and element
   /// width.
-  [[deprecated(
-      "Use LLT::scalable_vector(unsigned, LLT) instead.")]] static constexpr LLT
-  scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits) {
+  static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits) {
     return vector(ElementCount::getScalable(MinNumElements),
-                  LLT::integer(ScalarSizeInBits));
+                  LLT::scalar(ScalarSizeInBits));
   }
 
   /// Get a low-level scalable vector of some number of elements and element
@@ -211,12 +206,10 @@ class LLT {
     return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
   }
 
-  [[deprecated(
-      "Use LLT::scalarOrVector(EC, LLT) instead.")]] static constexpr LLT
-  scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
+  static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
     assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
            "Not enough bits in LLT to represent size");
-    return scalarOrVector(EC, LLT::integer(static_cast<unsigned>(ScalarSize)));
+    return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
   }
 
   explicit constexpr LLT(Kind Info, ElementCount EC, uint64_t SizeInBits,
@@ -225,11 +218,11 @@ class LLT {
     init(Info, EC, SizeInBits, AddressSpace, FP);
   }
 
-  LLVM_ABI explicit LLT(MVT VT);
+  LLVM_ABI explicit LLT(MVT VT, bool AllowExtendedLLT = false);
   explicit constexpr LLT() : Info(static_cast<Kind>(0)), RawData(0) {}
 
   constexpr bool isValid() const { return isToken() || RawData != 0; }
-  constexpr bool isScalar() const { return Info == Kind::INTEGER || Info == Kind::FLOAT; }
+  constexpr bool isScalar() const { return Info == Kind::ANY_SCALAR || Info == Kind::INTEGER || Info == Kind::FLOAT; }
   constexpr bool isScalar(unsigned Size) const {
     return isScalar() && getScalarSizeInBits() == Size;
   }
@@ -262,7 +255,13 @@ class LLT {
     return isVariantFloat(128, FPVariant::VARIANT_FLOAT_2);
   }
   constexpr bool isToken() const {
-    return Info == Kind::INTEGER && RawData == 0;
+    return Info == Kind::ANY_SCALAR && RawData == 0;
+  }
+  constexpr bool isAnyScalar() const {
+    return isValid() && Info == Kind::ANY_SCALAR;
+  }
+  constexpr bool isVectorAny() const {
+    return isVector() && Info == Kind::VECTOR_ANY;
   }
   constexpr bool isInteger() const {
     return isValid() && Info == Kind::INTEGER;
@@ -275,8 +274,8 @@ class LLT {
   }
   constexpr bool isVector() const {
     return isValid() &&
-           (Info == Kind::VECTOR_INTEGER || Info == Kind::VECTOR_FLOAT ||
-            Info == Kind::VECTOR_POINTER);
+           (Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER||
+            Info == Kind::VECTOR_FLOAT || Info == Kind::VECTOR_POINTER);
   }
   constexpr bool isPointer() const {
     return isValid() && Info == Kind::POINTER;
@@ -367,8 +366,12 @@ class LLT {
   constexpr LLT changeElementSize(unsigned NewEltSize) const {
     assert(!isPointerOrPointerVector() && !(isFloat() || isFloatVector()) &&
            "invalid to directly change element size for pointers");
-    return isVector() ? LLT::vector(getElementCount(), LLT::integer(NewEltSize))
-                      : LLT::integer(NewEltSize);
+    return isVector()
+               ? LLT::vector(getElementCount(), getElementType().isInteger()
+                                                    ? LLT::integer(NewEltSize)
+                                                    : LLT::scalar(NewEltSize))
+           : isInteger() ? LLT::integer(NewEltSize)
+                         : LLT::scalar(NewEltSize);
   }
 
   /// Return a vector or scalar with the same element type and the new element
@@ -395,7 +398,10 @@ class LLT {
     }
 
     assert(getScalarSizeInBits() % Factor == 0);
-    return integer(getScalarSizeInBits() / Factor);
+    if (isInteger())
+      return integer(getScalarSizeInBits() / Factor);
+
+    return scalar(getScalarSizeInBits() / Factor);
   }
 
   /// Produce a vector type that is \p Factor times bigger, preserving the
@@ -435,7 +441,10 @@ class LLT {
     if (isFloatVector())
       return floatingPoint(getScalarSizeInBits(), getFPVariant());
 
-    return integer(getScalarSizeInBits());
+    if (isIntegerVector())
+      return integer(getScalarSizeInBits());
+
+    return scalar(getScalarSizeInBits());
   }
 
   constexpr LLT changeToInteger() const {
@@ -455,6 +464,13 @@ class LLT {
 #endif
 
   constexpr bool operator==(const LLT &RHS) const {
+    if (isAnyScalar() || RHS.isAnyScalar()) {
+      return isScalar() == RHS.isScalar() && RawData == RHS.RawData;
+    }
+    if (isVector() && RHS.isVector()) {
+      return getElementType() == RHS.getElementType() &&
+             getElementCount() == RHS.getElementCount();
+    }
     return Info == RHS.Info && RawData == RHS.RawData;
   }
 
@@ -465,8 +481,8 @@ class LLT {
 
 private:
   /// LLT is packed into 64 bits as follows:
-  /// Info : 3
-  /// RawData : 61
+  /// Info : 4
+  /// RawData : 60
   /// with 61 bits of RawData remaining for Kind-specific data, packed in
   /// bitfields as described below. As there isn't a simple portable way to pack
   /// bits into bitfields, here the different fields in the packed structure is
@@ -480,13 +496,13 @@ class LLT {
    63       56       47       39       31       23       15       7      0
    |        |        |        |        |        |        |        |      |
   |xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|xxxxxxxx|
-   ...................................                                      (1)
-   *****************                                                        (2)
-                     ~~~~~~~~~~~~~~~~~~~~~~~~~~                             (3)
-                                                ^^^^^^^^^^^^^^^^^           (4)
-                                                                      @     (5)
+   ........ ........ ........ ........                                      (1)
+   ******** ********                                                        (2)
+                     ~~~~~~~~ ~~~~~~~~ ~~~~~~~~                             (3)
+                                                ^^^^^^^^ ^^^^^^^^           (4)
+                                                                     @      (5)
                                             ###                             (6)
-                                                                       %%%  (7)
+                                                                      %%%%  (7)
 
   (1) ScalarSize  (2) PointerSize  (3) PointerAddressSpace
   (4) VectorElements  (5) VectorScalable  (6) FPVariant  (7) Kind
@@ -500,18 +516,20 @@ class LLT {
   ///   valid encodings, SizeInBits/SizeOfElement must be larger than 0.
   /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
   ///   SizeInBits: 32;
-  static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 29};
-  static const constexpr BitFieldInfo FPFieldInfo{3, 21};
+  ///   FPInfoField: 3;
+  static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 28};
+  static const constexpr BitFieldInfo FPFieldInfo{3, 20};
   /// * Pointer (isPointer == 1 && isVector == 0):
   ///   SizeInBits: 16;
   ///   AddressSpace: 24;
-  static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 45};
-  static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{24, 21};
+  static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 44};
+  static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{24, 20};
   /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
   ///   NumElements: 16;
   ///   SizeOfElement: 32;
+  ///   FPInfoField: 3;
   ///   Scalable: 1;
-  static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 5};
+  static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 4};
   static const constexpr BitFieldInfo VectorScalableFieldInfo{1, 0};
   /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
   ///   NumElements: 16;
@@ -519,8 +537,8 @@ class LLT {
   ///   AddressSpace: 24;
   ///   Scalable: 1;
 
-  Kind Info : 3;
-  uint64_t RawData : 61;
+  Kind Info : 4;
+  uint64_t RawData : 60;
 
   static constexpr uint64_t getMask(const BitFieldInfo FieldInfo) {
     const int FieldSizeInBits = FieldInfo[0];
@@ -553,8 +571,8 @@ class LLT {
                 maskAndShift((uint64_t)FP, FPFieldInfo);
     }
 
-    if (Info == Kind::VECTOR_INTEGER || Info == Kind::VECTOR_FLOAT ||
-        Info == Kind::VECTOR_POINTER) {
+    if (Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER ||
+        Info == Kind::VECTOR_FLOAT || Info == Kind::VECTOR_POINTER) {
       RawData |= maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
                  maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
     }
@@ -562,7 +580,7 @@ class LLT {
 
 public:
   constexpr uint64_t getUniqueRAWLLTData() const {
-    return ((uint64_t)RawData) << 3 | ((uint64_t)Info);
+    return ((uint64_t)RawData) << 4 | ((uint64_t)Info);
   }
 };
 
@@ -574,12 +592,12 @@ inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
 template <> struct DenseMapInfo<LLT> {
   static inline LLT getEmptyKey() {
     LLT Invalid;
-    Invalid.Info = static_cast<LLT::Kind>(2);
+    Invalid.Info = LLT::Kind::POINTER;
     return Invalid;
   }
   static inline LLT getTombstoneKey() {
     LLT Invalid;
-    Invalid.Info = static_cast<LLT::Kind>(3);
+    Invalid.Info = LLT::Kind::VECTOR_ANY;
     return Invalid;
   }
   static inline unsigned getHashValue(const LLT &Ty) {
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index bf4e490554723..5671318a8586d 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -295,6 +295,7 @@ class LLVM_ABI TargetMachine {
   bool getO0WantsFastISel() { return O0WantsFastISel; }
   void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
   void setGlobalISel(bool Enable) { Options.EnableGlobalISel = Enable; }
+  void setGlobalISelExtendedLLT(bool Enable) { Options.EnableGlobalISelExtendedLLT = Enable; }
   void setGlobalISelAbort(GlobalISelAbortMode Mode) {
     Options.GlobalISelAbort = Mode;
   }
diff --git a/llvm/include/llvm/Target/TargetOptions.h b/llvm/include/llvm/Target/TargetOptions.h
index db90f2e4cc7cc..839412cd151bf 100644
--- a/llvm/include/llvm/Target/TargetOptions.h
+++ b/llvm/include/llvm/Target/TargetOptions.h
@@ -123,7 +123,8 @@ class TargetOptions {
         ApproxFuncFPMath(false), EnableAIXExtendedAltivecABI(false),
         HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false),
         GuaranteedTailCallOpt(false), StackSymbolOrdering(true),
-        EnableFastISel(false), EnableGlobalISel(false), UseInitArray(false),
+        EnableFastISel(false), EnableGlobalISel(false),
+        EnableGlobalISelExtendedLLT(false), UseInitArray(false),
         DisableIntegratedAS(false), FunctionSections(false),
         DataSections(false), IgnoreXCOFFVisibility(false),
         XCOFFTracebackTable(true), UniqueSectionNames(true),
@@ -235,6 +236,10 @@ class TargetOptions {
   /// EnableGlobalISel - This flag enables global instruction selection.
   unsigned EnableGlobalISel : 1;
 
+  /// EnableGlobalISelExtendedLLT - This flag enables LLTs with extenden type info
+  /// so target may distinguish different formats of equal sized scalars.
+  unsigned EnableGlobalISelExtendedLLT : 1;
+
   /// EnableGlobalISelAbort - Control abort behaviour when global instruction
   /// selection fails to lower/select an instruction.
   GlobalISelAbortMode GlobalISelAbort = GlobalISelAbortMode::Enable;
diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp
index 2ef96cc4400f7..61d509db7f14a 100644
--- a/llvm/lib/CodeGen/Analysis.cpp
+++ b/llvm/lib/CodeGen/Analysis.cpp
@@ -146,8 +146,8 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
   }
 }
 
-void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
-                            SmallVectorImpl<LLT> &ValueTys,
+void llvm::computeValueLLTs(const TargetLowering &TLI, const DataLayout &DL,
+                            Type &Ty, SmallVectorImpl<LLT> &ValueTys,
                             SmallVectorImpl<uint64_t> *Offsets,
                             uint64_t StartingOffset) {
   // Given a struct type, recursively traverse the elements.
@@ -158,7 +158,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
     const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
       uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
-      computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
+      computeValueLLTs(TLI, DL, *STy->getElementType(I), ValueTys, Offsets,
                        StartingOffset + EltOffset);
     }
     return;
@@ -168,7 +168,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
     Type *EltTy = ATy->getElementType();
     uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
-      computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
+      computeValueLLTs(TLI, DL, *EltTy, ValueTys, Offsets,
                        StartingOffset + i * EltSize);
     return;
   }
@@ -176,7 +176,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
   if (Ty.isVoidTy())
     return;
   // Base case: we can get an LLT for this LLVM IR type.
-  ValueTys.push_back(getLLTForType(Ty, DL));
+  ValueTys.push_back(TLI.getLLTForType(Ty, DL));
   if (Offsets != nullptr)
     Offsets->push_back(StartingOffset * 8);
 }
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 90a18b86c1b1f..41609dae1b156 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -159,7 +159,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
 
   if (const Function *F = dyn_cast<Function>(CalleeV)) {
     if (F->hasFnAttribute(Attribute::NonLazyBind)) {
-      LLT Ty = getLLTForType(*F->getType(), DL);
+      LLT Ty = TLI->getLLTForType(*F->getType(), DL);
       Register Reg = MIRBuilder.buildGlobalValue(Ty, F).getReg(0);
       Info.Callee = MachineOperand::CreateReg(Reg, false);
     } else {
@@ -781,11 +781,11 @@ bool CallLowering::handleAssignments(ValueHandler &Handler,
     const MVT ValVT = VA.getValVT();
     const MVT LocVT = VA.getLocVT();
 
-    const LLT LocTy(LocVT);
-    const LLT ValTy(ValVT);
+    const LLT LocTy = TLI->getLLTForMVT(LocVT);
+    const LLT ValTy = TLI->getLLTForMVT(ValVT);
     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
     const EVT OrigVT = EVT::getEVT(Args[i].Ty);
-    const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
+    const LLT OrigTy = TLI->getLLTForType(*Args[i].Ty, DL);
     const LLT PointerTy = LLT::pointer(
         AllocaAddressSpace, DL.getPointerSizeInBits(AllocaAddressSpace));
 
@@ -1004,7 +1004,7 @@ void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
   Type *RetPtrTy =
       PointerType::get(RetTy->getContext(), DL.getAllocaAddrSpace());
-  LLT OffsetLLTy = getLLTForType(*DL.getIndexType(RetPtrTy), DL);
+  LLT OffsetLLTy = TLI->getLLTForType(*DL.getIndexType(RetPtrTy), DL);
 
   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
 
@@ -1035,7 +1035,8 @@ void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
   unsigned NumValues = SplitVTs.size();
   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
   unsigned AS = DL.getAllocaAddrSpace();
-  LLT OffsetLLTy = getLLTForType(*DL.getIndexType(RetTy->getContext(), AS), DL);
+  LLT OffsetLLTy =
+      TLI->getLLTForType(*DL.getIndexType(RetTy->getContext(), AS), DL);
 
   MachinePointerInfo PtrInfo(AS);
 
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 8424a8108d76e..4484a0769ad2a 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -193,7 +193,7 @@ IRTranslator::allocateVRegs(const Value &Val) {
   auto *Regs = VMap.getVRegs(Val);
   auto *Offsets = VMap.getOffsets(Val);
   SmallVector<LLT, 4> SplitTys;
-  computeValueLLTs(*DL, *Val.getType(), SplitTys,
+  computeValueLLTs(*TLI, *DL, *Val.getType(), SplitTys,
                    Offsets->empty() ? Offsets : nullptr);
   for (unsigned i = 0; i < SplitTys.size(); ++i)
     Regs->push_back(0);
@@ -217,7 +217,7 @@ ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
            "Don't know how to create an empty vreg");
 
   SmallVector<LLT, 4> SplitTys;
-  computeValueLLTs(*DL, *Val.getType(), SplitTys,
+  computeValueLLTs(*TLI, *DL, *Val.getType(), SplitTys,
                    Offsets->empty() ? Offsets : nullptr);
 
   if (!isa<Constant>(Val)) {
@@ -858,7 +858,7 @@ void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
   MIB.setDebugLoc(CurBuilder->getDebugLoc());
 
   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
-  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
+  const LLT PtrTy = TLI->getLLTForType(*PtrIRTy, *DL);
 
   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
@@ -873,7 +873,7 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
 
   const Value &SValue = *JTH.SValue;
   // Subtract the lowest switch case value from the value being switched on.
-  const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
+  const LLT SwitchTy = TLI->getLLTForType(*SValue.getType(), *DL);
   Register SwitchOpReg = getOrCreateVReg(SValue);
   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
@@ -1106,7 +1106,7 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
   auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
 
   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
-  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
+  const LLT PtrTy = TLI->getLLTForType(*PtrIRTy, *DL);
 
   LLT MaskTy = SwitchOpTy;
   if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
@@ -1159,7 +1159,7 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
   MachineIRBuilder &MIB = *CurBuilder;
   MIB.setMBB(*SwitchBB);
 
-  LLT SwitchTy = getLLTForMVT(BB.RegVT);
+  LLT SwitchTy = TLI->getLLTForMVT(BB.RegVT);
   Register Cmp;
   unsigned PopCount = llvm::popcount(B.Mask);
   if (PopCount == 1) {
@@ -1386,7 +1386,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
 
   const Value *Ptr = LI.getPointerOperand();
   Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
-  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+  LLT OffsetTy = TLI->getLLTForType(*OffsetIRTy, *DL);
 
   if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
     assert(Regs.size() == 1 && "swifterror should be single pointer");
@@ -1433,7 +1433,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
   Register Base = getOrCreateVReg(*SI.getPointerOperand());
 
   Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
-  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+  LLT OffsetTy = TLI->getLLTForType(*OffsetIRTy, *DL);
 
   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
     assert(Vals.size() == 1 && "swifterror should be single pointer");
@@ -1555,8 +1555,8 @@ bool IRTranslator::translateCopy(const User &U, const Value &V,
 bool IRTranslator::translateBitCast(const User &U,
                                     MachineIRBuilder &MIRBuilder) {
   // If we're bitcasting to the source type, we can reuse the source vreg.
-  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
-      getLLTForType(*U.getType(), *DL)) {
+  if (TLI->getLLTForType(*U.getOperand(0)->getType(), *DL) ==
+      TLI->getLLTForType(*U.getType(), *DL)) {
     // If the source is a ConstantInt then it was probably created by
     // ConstantHoisting and we should leave it alone.
     if (isa<ConstantInt>(U.getOperand(0)))
@@ -1588,9 +1588,9 @@ bool IRTranslator::translateGetElementPtr(const User &U,
   Value &Op0 = *U.getOperand(0);
   Register BaseReg = getOrCreateVReg(Op0);
   Type *PtrIRTy = Op0.getType();
-  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
+  LLT PtrTy = TLI->getLLTForType(*PtrIRTy, *DL);
   Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
-  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+  LLT OffsetTy = TLI->getLLTForType(*OffsetIRTy, *DL);
 
   uint32_t PtrAddFlags = 0;
   // Each PtrAdd generated to implement the GEP inherits its nuw, nusw, inbounds
@@ -1627,9 +1627,9 @@ bool IRTranslator::translateGetElementPtr(const User &U,
                                          BaseReg)
                   .getReg(0);
     PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
-    PtrTy = getLLTForType(*PtrIRTy, *DL);
+    PtrTy = TLI->getLLTForType(*PtrIRTy, *DL);
     OffsetIRTy = DL->getIndexType(PtrIRTy);
-    OffsetTy = getLLTForType(*OffsetIRTy, *DL);
+    OffsetTy = TLI->getLLTForType(*OffsetIRTy, *DL);
   }
 
   int64_t Offset = 0;
@@ -1679,7 +1679,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
       Register GepOffsetReg;
       if (ElementSize != 1) {
         auto ElementSizeMIB = MIRBuilder.buildConstant(
-            getLLTForType(*OffsetIRTy, *DL), ElementSize);
+            TLI->getLLTForType(*OffsetIRTy, *DL), ElementSize);
 
         // The multiplication is NUW if the GEP is NUW and NSW if the GEP is
         // NUSW.
@@ -2336,7 +2336,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
                           MachineInstr::copyFlagsFromInstruction(CI));
     } else {
-      LLT Ty = getLLTForType(*CI.getType(), *DL);
+      LLT Ty = TLI->getLLTForType(*CI.getType(), *DL);
       auto FMul = MIRBuilder.buildFMul(
           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
       MIRBuilder.buildFAdd(Dst, FMul, Op2,
@@ -2403,7 +2403,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
     return true;
   case Intrinsic::stackprotector: {
-    LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
+    LLT PtrTy = TLI->getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
     Register GuardVal;
     if (TLI->useLoadStackGuardNode(*CI.getModule())) {
       GuardVal = MRI->createGenericVirtualRegister(PtrTy);
@@ -2650,7 +2650,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
   case Intrinsic::vector_deinterleave2: {
     // Both intrinsics have at least one operand.
     Value *Op0 = CI.getOperand(0);
-    LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
+    LLT ResTy = TLI->getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
     if (!ResTy.isFixedVector())
       return false;
 
@@ -2700,7 +2700,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
   for (const auto &Arg : CB.args()) {
     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
-      LLT Ty = getLLTForType(*Arg->getType(), *DL);
+      LLT Ty = TLI->getLLTForType(*Arg->getType(), *DL);
       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
                                             &CB, &MIRBuilder.getMBB(), Arg));
@@ -2852,7 +2852,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
     Align Alignment = Info.align.value_or(
         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
     LLT MemTy = Info.memVT.isSimple()
-                    ? getLLTForMVT(Info.memVT.getSimpleVT())
+                    ? TLI->getLLTForMVT(Info.memVT.getSimpleVT())
                     : LLT::scalar(Info.memVT.getStoreSizeInBits());
 
     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
@@ -3060,13 +3060,13 @@ bool IRTranslator::translateLandingPad(const User &U,
   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
 
-  LLT Ty = getLLTForType(*LP.getType(), *DL);
+  LLT Ty = TLI->getLLTForType(*LP.getType(), *DL);
   Register Undef = MRI->createGenericVirtualRegister(Ty);
   MIRBuilder.buildUndef(Undef);
 
   SmallVector<LLT, 2> Tys;
   for (Type *Ty : cast<StructType>(LP.getType())->elements())
-    Tys.push_back(getLLTForType(*Ty, *DL));
+    Tys.push_back(TLI->getLLTForType(*Ty, *DL));
   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
 
   // Mark exception register as live in.
@@ -3111,7 +3111,7 @@ bool IRTranslator::translateAlloca(const User &U,
   // Now we're in the harder dynamic case.
   Register NumElts = getOrCreateVReg(*AI.getArraySize());
   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
-  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
+  LLT IntPtrTy = TLI->getLLTForType(*IntPtrIRTy, *DL);
   if (MRI->getType(NumElts) != IntPtrTy) {
     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
@@ -3854,8 +3854,8 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
   CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
   // First create the loads to the guard/stack slot for the comparison.
   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
-  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
-  LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
+  const LLT PtrTy = TLI->getLLTForType(*PtrIRTy, *DL);
+  LLT PtrMemTy = TLI->getLLTForMVT(TLI->getPointerMemTy(*DL));
 
   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
   int FI = MFI.getStackProtectorIndex();
diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
index b2f84351c9a95..b1e134ce5ef9e 100644
--- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
@@ -952,7 +952,7 @@ void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {
   const auto &LI = *MF->getSubtarget().getLegalizerInfo();
   const auto &DL = MF->getFunction().getDataLayout();
   Type *IRPtrTy = PointerType::get(MF->getFunction().getContext(), AddrSpace);
-  LLT PtrTy = getLLTForType(*IRPtrTy, DL);
+  LLT PtrTy = TLI->getLLTForType(*IRPtrTy, DL);
   // We assume that we're not going to be generating any stores wider than
   // MaxStoreSizeToForm bits for now.
   for (unsigned Size = 2; Size <= MaxStoreSizeToForm; Size *= 2) {
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index cf34bf71a8c3a..6a15b1748630d 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -17,16 +17,16 @@
 #include "llvm/IR/DerivedTypes.h"
 using namespace llvm;
 
-LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
-  if (auto VTy = dyn_cast<VectorType>(&Ty)) {
+LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL, bool AllowExtendedLLT) {
+  if (auto *VTy = dyn_cast<VectorType>(&Ty)) {
     auto EC = VTy->getElementCount();
-    LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL);
+    LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL, AllowExtendedLLT);
     if (EC.isScalar())
       return ScalarTy;
     return LLT::vector(EC, ScalarTy);
   }
 
-  if (auto PTy = dyn_cast<PointerType>(&Ty)) {
+  if (auto *PTy = dyn_cast<PointerType>(&Ty)) {
     unsigned AddrSpace = PTy->getAddressSpace();
     return LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
   }
@@ -37,6 +37,11 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
     auto SizeInBits = DL.getTypeSizeInBits(&Ty);
     assert(SizeInBits != 0 && "invalid zero-sized type");
 
+    // Return simple scalar
+    if (!AllowExtendedLLT)
+      return LLT::scalar(SizeInBits);
+
+    // Choose more precise LLT variant
     if (Ty.isFloatingPointTy()) {
       if (Ty.isHalfTy())
         return LLT::float16();
@@ -66,7 +71,7 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
       return LLT::integer(SizeInBits);
     }
 
-    return LLT::integer(SizeInBits);
+    return LLT::scalar(SizeInBits);
   }
 
   if (Ty.isTokenTy())
@@ -106,10 +111,13 @@ EVT llvm::getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx) {
   return EVT::getIntegerVT(Ctx, Ty.getSizeInBits());
 }
 
-LLT llvm::getLLTForMVT(MVT Ty) { return LLT(Ty); }
+LLT llvm::getLLTForMVT(MVT VT, bool AllowExtendedLLT) {
+  return LLT(VT, AllowExtendedLLT);
+}
 
 const llvm::fltSemantics &llvm::getFltSemanticForLLT(LLT Ty) {
-  assert(Ty.isFloat() && "Expected a scalar type.");
+  assert((Ty.isAnyScalar() || Ty.isFloat()) &&
+         "Expected a any scalar or float type.");
 
   if (Ty.isBFloat(16))
     return APFloat::BFloat();
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index b0cd260fadd7e..699f4d3529b6e 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1934,10 +1934,11 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
       TypeDigits.consume_front("f") || TypeDigits.consume_front("p") ||
       TypeDigits.consume_front("bf")) {
     if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
-      return error("expected integers after 's'/'i'/'f'/'bf'/'p' type prefix");
+      return error("expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
   }
 
   if (Token.range().starts_with("s") || Token.range().starts_with("i")) {
+    bool ScalarOrInt = Token.range().starts_with("s");
     auto ScalarSize = APSInt(TypeDigits).getZExtValue();
     if (!ScalarSize) {
       Ty = LLT::token();
@@ -1948,7 +1949,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
     if (!verifyScalarSize(ScalarSize))
       return error("invalid size for scalar type");
 
-    Ty = LLT::integer(ScalarSize);
+    Ty = ScalarOrInt ? LLT::scalar(ScalarSize) : LLT::integer(ScalarSize);
     lex();
     return false;
   }
@@ -1983,8 +1984,9 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
 
   // Now we're looking for a vector.
   if (Token.isNot(MIToken::less))
-    return error(Loc, "expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, "
-                      "or <vscale x M x pA> for GlobalISel type");
+    return error(Loc, "expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, "
+                      "or <vscale x M x pA> for GlobalISel type, "
+                      "where t = {'s', 'i', 'f', 'bf'}");
   lex();
 
   bool HasVScale =
@@ -1992,15 +1994,15 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
   if (HasVScale) {
     lex();
     if (Token.isNot(MIToken::Identifier) || Token.stringValue() != "x")
-      return error("expected <vscale x M x sN> or <vscale x M x pA>");
+      return error("expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}");
     lex();
   }
 
   auto GetError = [this, &HasVScale, Loc]() {
     if (HasVScale)
       return error(
-          Loc, "expected <vscale x M x sN> or <vscale M x pA> for vector type");
-    return error(Loc, "expected <M x sN> or <M x pA> for vector type");
+          Loc, "expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}");
+    return error(Loc, "expected <M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}");
   };
 
   if (Token.isNot(MIToken::IntegerLiteral))
@@ -2027,10 +2029,11 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
         "expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
 
   if (Token.range().starts_with("s") || Token.range().starts_with("i")) {
+    bool ScalarOrInt = Token.range().starts_with("s");
     auto ScalarSize = APSInt(VectorTyDigits).getZExtValue();
     if (!verifyScalarSize(ScalarSize))
       return error("invalid size for scalar element in vector");
-    Ty = LLT::integer(ScalarSize);
+    Ty = ScalarOrInt ? LLT::scalar(ScalarSize) : LLT::integer(ScalarSize);
   } else if (Token.range().starts_with("p")) {
     const DataLayout &DL = MF.getDataLayout();
     uint64_t AS = APSInt(VectorTyDigits).getZExtValue();
@@ -2068,9 +2071,9 @@ bool MIParser::parseTypedImmediateOperand(MachineOperand &Dest) {
       !TypeDigits.consume_front("p") && !TypeDigits.consume_front("f") &&
       !TypeDigits.consume_front("bf"))
     return error("a typed immediate operand should start with one of 'i', "
-                 "'s','f','bf', or 'p'");
+                 "'s', 'f', 'bf', or 'p'");
   if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
-    return error("expected integers after 'i'/'s'/'f'/'bf'/'p' type character");
+    return error("expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier");
 
   auto Loc = Token.location();
   lex();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index ece50ed95fc49..ca75c42ee0a72 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2457,7 +2457,7 @@ void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
   const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
 
   EVT VT = Op->getValueType(0);
-  LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
+  LLT Ty = VT.isSimple() ? TLI->getLLTForMVT(VT.getSimpleVT()) : LLT();
 
   const MachineFunction &MF = CurDAG->getMachineFunction();
   Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
@@ -2488,7 +2488,7 @@ void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
   const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
 
   EVT VT = Op->getOperand(2).getValueType();
-  LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
+  LLT Ty = VT.isSimple() ? TLI->getLLTForMVT(VT.getSimpleVT()) : LLT();
 
   const MachineFunction &MF = CurDAG->getMachineFunction();
   Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty, MF);
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 350948a92a3ae..ee95f9ea05c02 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -978,6 +978,14 @@ EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
   return ShiftVT;
 }
 
+LLT TargetLoweringBase::getLLTForType(Type &Ty, const DataLayout &DL) const {
+  return llvm::getLLTForType(Ty, DL, TM.Options.EnableGlobalISelExtendedLLT);
+}
+
+LLT TargetLoweringBase::getLLTForMVT(MVT Ty) const {
+  return llvm::getLLTForMVT(Ty, TM.Options.EnableGlobalISelExtendedLLT);
+}
+
 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
   assert(isTypeLegal(VT));
   switch (Op) {
diff --git a/llvm/lib/CodeGenTypes/LowLevelType.cpp b/llvm/lib/CodeGenTypes/LowLevelType.cpp
index 925b4efaf0edf..35329793bd508 100644
--- a/llvm/lib/CodeGenTypes/LowLevelType.cpp
+++ b/llvm/lib/CodeGenTypes/LowLevelType.cpp
@@ -32,30 +32,47 @@ static std::optional<LLT::FPVariant> deriveFPInfo(MVT VT) {
   }
 }
 
-LLT::LLT(MVT VT) {
+LLT::LLT(MVT VT, bool AllowExtendedLLT) {
+  if (!AllowExtendedLLT) {
+    if (VT.isVector()) {
+      bool AsVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
+      Kind Info = AsVector ? Kind::VECTOR_ANY : Kind::ANY_SCALAR;
+      init(Info, VT.getVectorElementCount(),
+           VT.getVectorElementType().getSizeInBits(), 0, FPVariant::IEEE_FLOAT);
+    } else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
+      init(Kind::ANY_SCALAR, ElementCount::getFixed(0), VT.getSizeInBits(), 0,
+           FPVariant::IEEE_FLOAT);
+    } else {
+      this->Info = Kind::INVALID;
+      this->RawData = 0;
+    }
+    return;
+  }
+
   auto FP = deriveFPInfo(VT);
   bool AsVector = VT.isVector() &&
                   (VT.getVectorMinNumElements() > 1 || VT.isScalableVector());
 
-  Kind Info;
+  LLT::Kind Info;
   if (FP.has_value())
-    Info = AsVector ? Kind::VECTOR_FLOAT : Kind::FLOAT;
+    Info = AsVector ? LLT::Kind::VECTOR_FLOAT : LLT::Kind::FLOAT;
   else
-    Info = AsVector ? Kind::VECTOR_INTEGER : Kind::INTEGER;
+    Info = AsVector ? LLT::Kind::VECTOR_INTEGER : LLT::Kind::INTEGER;
 
   if (VT.isVector()) {
     init(Info, VT.getVectorElementCount(),
-         VT.getVectorElementType().getSizeInBits(),
-         /*AddressSpace=*/0, FP.value_or(FPVariant::IEEE_FLOAT));
+         VT.getVectorElementType().getSizeInBits(), 0,
+         FP.value_or(LLT::FPVariant::IEEE_FLOAT));
   } else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
     // Aggregates are no different from real scalars as far as GlobalISel is
     // concerned.
-    init(Info, ElementCount::getFixed(0), VT.getSizeInBits(),
-         /*AddressSpace=*/0, FP.value_or(FPVariant::IEEE_FLOAT));
+    init(Info, ElementCount::getFixed(0), VT.getSizeInBits(), 0,
+         FP.value_or(LLT::FPVariant::IEEE_FLOAT));
   } else {
-    this->Info = static_cast<Kind>(0);
+    this->Info = Kind::INVALID;
     this->RawData = 0;
   }
+  return;
 }
 
 void LLT::print(raw_ostream &OS) const {
diff --git a/llvm/lib/Support/TypeSize.cpp b/llvm/lib/Support/TypeSize.cpp
index 43346b81cd676..a96fced8eff30 100644
--- a/llvm/lib/Support/TypeSize.cpp
+++ b/llvm/lib/Support/TypeSize.cpp
@@ -44,7 +44,7 @@ void llvm::reportInvalidSizeRequest(const char *Msg) {
     return;
   }
 #endif
-  report_fatal_error("Invalid size request on a scalable vector.");
+  report_fatal_error(Twine("Invalid size request on a scalable vector.") + Msg);
 }
 
 TypeSize::operator TypeSize::ScalarTy() const {
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index 79bef76cf4c4f..c897a0f65084f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -457,7 +457,7 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
             ExtendOp = TargetOpcode::G_ZEXT;
 
           LLT NewLLT(NewVT);
-          LLT OldLLT = getLLTForType(*CurArgInfo.Ty, DL);
+          LLT OldLLT = TLI.getLLTForType(*CurArgInfo.Ty, DL);
           CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
           // Instead of an extend, we might have a vector type which needs
           // padding with more elements, e.g. <2 x half> -> <4 x half>.
@@ -1397,7 +1397,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
     // is set.
     if (Info.Callee.isSymbol() && F.getParent()->getRtLibUseGOT()) {
       auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_GLOBAL_VALUE);
-      DstOp(getLLTForType(*F.getType(), DL)).addDefToMIB(MRI, MIB);
+      DstOp(TLI.getLLTForType(*F.getType(), DL)).addDefToMIB(MRI, MIB);
       MIB.addExternalSymbol(Info.Callee.getSymbolName(), AArch64II::MO_GOT);
       Info.Callee = MachineOperand::CreateReg(MIB.getReg(0), false);
     }
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 31954e7954c03..f4ab0190d223f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -569,7 +569,7 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
     case Intrinsic::aarch64_neon_fcvtpu:
       // Force FPR register bank for half types, as those types otherwise
       // don't get legalized correctly resulting in fp16 <-> gpr32 COPY's.
-      return MRI.getType(MI.getOperand(2).getReg()) == LLT::float16();
+      return MRI.getType(MI.getOperand(2).getReg()) == LLT::scalar(16); // TODO: Expected LLT::float16()
     default:
       break;
     }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index d1a5b4e85da46..b5ef1a87a5abb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -332,7 +332,7 @@ bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
                                           extOpcodeToISDExtOpcode(ExtendOp));
       if (ExtVT != VT) {
         RetInfo.Ty = ExtVT.getTypeForEVT(Ctx);
-        LLT ExtTy = getLLTForType(*RetInfo.Ty, DL);
+        LLT ExtTy = TLI.getLLTForType(*RetInfo.Ty, DL);
         Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0);
       }
     }
@@ -414,6 +414,7 @@ void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg,
   MachineFunction &MF = B.getMF();
   const Function &F = MF.getFunction();
   const DataLayout &DL = F.getDataLayout();
+  const SITargetLowering &TLI = *getTLI<SITargetLowering>();
   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
 
   LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
@@ -427,7 +428,7 @@ void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg,
     Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy);
     lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]);
 
-    LLT ArgTy = getLLTForType(*SplitArg.Ty, DL);
+    LLT ArgTy = TLI.getLLTForType(*SplitArg.Ty, DL);
     if (SplitArg.Flags[0].isPointer()) {
       // Compensate for losing pointeriness in splitValueTypes.
       LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index d23521c87e202..853f55278c667 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -464,8 +464,9 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
   LLT DestTy = MRI.getType(Dst);
   LLT ScalarDestTy = DestTy.getScalarType();
 
-  if ((ScalarDestTy != LLT::float64() && ScalarDestTy != LLT::float32() &&
-       ScalarDestTy != LLT::float16()) ||
+  // TODO: Expected float type in ScalarDestTy
+  if ((ScalarDestTy != LLT::scalar(64) && ScalarDestTy != LLT::scalar(32) &&
+       ScalarDestTy != LLT::scalar(16)) ||
       !MRI.hasOneNonDBGUse(Sel.getOperand(0).getReg()))
     return false;
 
@@ -486,7 +487,8 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
     return false;
 
   // For f32, only non-inline constants should be transformed.
-  if (ScalarDestTy == LLT::float32() && TII.isInlineConstant(*SelectTrueVal) &&
+  // TODO: Expected float32
+  if (ScalarDestTy == LLT::scalar(32) && TII.isInlineConstant(*SelectTrueVal) &&
       TII.isInlineConstant(*SelectFalseVal))
     return false;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 600a13096f55d..ccc01ddfce777 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -295,9 +295,9 @@ constexpr LLT S1 = LLT::scalar(1);
 constexpr LLT S8 = LLT::scalar(8);
 constexpr LLT S16 = LLT::scalar(16);
 constexpr LLT S32 = LLT::scalar(32);
-constexpr LLT F32 = LLT::float32();
+constexpr LLT F32 = LLT::scalar(32); // TODO: Expected float32
 constexpr LLT S64 = LLT::scalar(64);
-constexpr LLT F64 = LLT::float64();
+constexpr LLT F64 = LLT::scalar(64); // TODO: Expected float64
 constexpr LLT S96 = LLT::scalar(96);
 constexpr LLT S128 = LLT::scalar(128);
 constexpr LLT S160 = LLT::scalar(160);
@@ -317,7 +317,7 @@ constexpr LLT V10S16 = LLT::fixed_vector(10, 16);
 constexpr LLT V12S16 = LLT::fixed_vector(12, 16);
 constexpr LLT V16S16 = LLT::fixed_vector(16, 16);
 
-constexpr LLT V2F16 = LLT::fixed_vector(2, LLT::float16());
+constexpr LLT V2F16 = LLT::fixed_vector(2, LLT::scalar(16)); // TODO: Expected float16
 constexpr LLT V2BF16 = V2F16; // FIXME
 
 constexpr LLT V2S32 = LLT::fixed_vector(2, 32);
@@ -3344,11 +3344,12 @@ bool AMDGPULegalizerInfo::legalizeFMad(
   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 
   // TODO: Always legal with future ftz flag.
+  // TODO: Type is expected to be float32/float16
   // FIXME: Do we need just output?
-  if (Ty == LLT::float32() &&
+  if (Ty == LLT::scalar(32) &&
       MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign())
     return true;
-  if (Ty == LLT::float16() &&
+  if (Ty == LLT::scalar(16) &&
       MFI->getMode().FP64FP16Denormals == DenormalMode::getPreserveSign())
     return true;
 
@@ -3900,8 +3901,8 @@ bool AMDGPULegalizerInfo::legalizeFPow(MachineInstr &MI,
   Register Src1 = MI.getOperand(2).getReg();
   unsigned Flags = MI.getFlags();
   LLT Ty = B.getMRI()->getType(Dst);
-  const LLT F16 = LLT::float16();
-  const LLT F32 = LLT::float32();
+  const LLT F16 = LLT::scalar(16); // TODO: Expected float16
+  const LLT F32 = LLT::scalar(32); // TODO: Expected float32
 
   if (Ty == F32) {
     auto Log = B.buildFLog2(F32, Src0, Flags);
@@ -3944,7 +3945,7 @@ bool AMDGPULegalizerInfo::legalizeFFloor(MachineInstr &MI,
                                          MachineIRBuilder &B) const {
 
   const LLT S1 = LLT::scalar(1);
-  const LLT F64 = LLT::float64();
+  const LLT F64 = LLT::scalar(64); // TODO: Expected float64
   Register Dst = MI.getOperand(0).getReg();
   Register OrigSrc = MI.getOperand(1).getReg();
   unsigned Flags = MI.getFlags();
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index 16a12cabce4c5..4af29cee0a3fe 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -10,6 +10,7 @@
 #include "Common/CodeGenInstruction.h"
 #include "Common/CodeGenRegisters.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/LEB128.h"
 #include "llvm/Support/ScopedPrinter.h"
@@ -20,6 +21,11 @@
 
 STATISTIC(NumPatternEmitted, "Number of patterns emitted");
 
+static llvm::cl::opt<bool> AllowExtendedLLT(
+    "gisel-extended-llt",
+    llvm::cl::desc("Generate an extended llt names in match tables"),
+    llvm::cl::init(false));
+
 namespace llvm {
 namespace gi {
 
@@ -417,6 +423,8 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
     else if (Ty.isFloat())
       OS << "LLT::floatingPoint(" << Ty.getScalarSizeInBits()
          << ", LLT::FPVariant::IEEE_FLOAT)";
+    else
+      OS << "LLT::scalar(" << Ty.getScalarSizeInBits() << ")";
     return;
   }
 
@@ -438,7 +446,8 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
     else if (ElemTy.isFloat())
       OS << "LLT::floatingPoint(" << ElemTy.getScalarSizeInBits()
          << ", LLT::FPVariant::IEEE_FLOAT)";
-
+    else
+      OS << "LLT::scalar(" << Ty.getScalarSizeInBits() << ")";
     OS << ")";
     return;
   }
@@ -456,7 +465,36 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
 /// particular logic behind the order but either A < B or B < A must be
 /// true if A != B.
 bool LLTCodeGen::operator<(const LLTCodeGen &Other) const {
-  return Ty.getUniqueRAWLLTData() < Other.Ty.getUniqueRAWLLTData();
+  if (Ty.isValid() != Other.Ty.isValid())
+    return Ty.isValid() < Other.Ty.isValid();
+  if (!Ty.isValid())
+    return false;
+
+  if (Ty.isVector() != Other.Ty.isVector())
+    return Ty.isVector() < Other.Ty.isVector();
+  if (Ty.isScalar() != Other.Ty.isScalar())
+    return Ty.isScalar() < Other.Ty.isScalar();
+  if (Ty.isPointer() != Other.Ty.isPointer())
+    return Ty.isPointer() < Other.Ty.isPointer();
+
+  if (Ty.isPointer() && Ty.getAddressSpace() != Other.Ty.getAddressSpace())
+    return Ty.getAddressSpace() < Other.Ty.getAddressSpace();
+
+  if (Ty.isVector() && Ty.getElementCount() != Other.Ty.getElementCount())
+    return std::tuple(Ty.isScalable(),
+                      Ty.getElementCount().getKnownMinValue()) <
+           std::tuple(Other.Ty.isScalable(),
+                      Other.Ty.getElementCount().getKnownMinValue());
+
+  assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
+         "Unexpected mismatch of scalable property");
+  return Ty.isVector()
+             ? std::tuple(Ty.isScalable(),
+                          Ty.getSizeInBits().getKnownMinValue()) <
+                   std::tuple(Other.Ty.isScalable(),
+                              Other.Ty.getSizeInBits().getKnownMinValue())
+             : Ty.getSizeInBits().getFixedValue() <
+                   Other.Ty.getSizeInBits().getFixedValue();
 }
 
 //===- LLTCodeGen Helpers -------------------------------------------------===//
@@ -465,10 +503,10 @@ std::optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
   MVT VT(SVT);
 
   if (VT.isVector() && !VT.getVectorElementCount().isScalar())
-    return LLTCodeGen(LLT(VT));
+    return LLTCodeGen(LLT(VT, AllowExtendedLLT));
 
   if (VT.isInteger() || VT.isFloatingPoint())
-    return LLTCodeGen(LLT(VT));
+    return LLTCodeGen(LLT(VT, AllowExtendedLLT));
 
   return std::nullopt;
 }

>From bc4b0ec59137b7effc2acdecabcbd0b179d64b35 Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Fri, 22 Aug 2025 19:14:47 +0300
Subject: [PATCH 5/9] Fix MIR tests

---
 .../CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir   | 4 ++--
 .../CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir   | 4 ++--
 .../CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir   | 2 +-
 .../CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir   | 6 +++---
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir | 2 +-
 .../test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir | 2 +-
 llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir | 2 +-
 .../MIR/WebAssembly/typed-immediate-operand-invalid0.mir    | 4 ++--
 .../MIR/WebAssembly/typed-immediate-operand-invalid1.mir    | 4 ++--
 22 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
index cece3601dc1b2..95fd669a5d971 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid0.mir
@@ -1,10 +1,10 @@
 # RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is only a single 's'/'p' character
+# When a low-level type is only a single 's'/'i'/'f'/'bf'/'p' type identifier
 ---
 name: test_low_level_type_is_single_s_p
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: expected integers after 's'/'p' type character
+    ; CHECK: [[@LINE+1]]:10: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
     %0:_(s) = COPY $x0
 ...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
index 4a7b68dab623a..dd0bb73a6cf42 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid1.mir
@@ -5,6 +5,6 @@ name: test_low_level_type_does_not_start_with_s_p_lt
 body: |
   bb.0:
     liveins: $x0
-    ; CHECK: [[@LINE+1]]:10: expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, or <vscale x M x pA> for GlobalISel type
-    %0:_(i64) = COPY $x0
+    ; CHECK: [[@LINE+1]]:10: expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, or <vscale x M x pA> for GlobalISel type, where t = {'s', 'i', 'f', 'bf'}
+    %0:_(n64) = COPY $x0
 ...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
index 1bff7a5ec9ced..6277d24aacab3 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid2.mir
@@ -5,6 +5,6 @@ name: test_low_level_type_is_single_s_p
 body: |
   bb.0:
     liveins: $q0
-    ; CHECK: [[@LINE+1]]:15: expected integers after 's'/'p' type character
+    ; CHECK: [[@LINE+1]]:15: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
     %0:_(<2 x p>) = COPY $q0
 ...
diff --git a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
index ebb3d37f9dfa1..ba652329c5337 100644
--- a/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid3.mir
@@ -1,10 +1,10 @@
 # RUN: not llc -mtriple=aarch64-- -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a low-level type is a vector which element type does not start with 's' or 'p'
+# When a low-level type is a vector which element type does not start with right type identifier
 ---
 name: test_low_level_type_does_not_start_with_s_p
 body: |
   bb.0:
     liveins: $q0
-    ; CHECK: [[@LINE+1]]:10: expected <M x sN> or <M x pA> for vector type
-    %0:_(<2 x i64>) = COPY $q0
+    ; CHECK: [[@LINE+1]]:10: expected <M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
+    %0:_(<2 x n64>) = COPY $q0
 ...
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
index 5553d97acd003..6fc6640ea422a 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err0.mir
@@ -6,5 +6,5 @@ body: |
     %0:_(<vscale) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale x M x pA>
+# CHECK: expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}
 
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
index 12bfb82ebcd12..0017e01be3b04 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err1.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale notanx) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale x M x pA>
+# CHECK: expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
index 7d7d7e49f23fe..37a9a0d2c159b 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err10.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
index f8927c22ab45f..64296f624826e 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err11.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s32) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
index 5ced1aea30c08..9c840d92ac967 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err12.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p0) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
index 94b8230233fa6..982adfdf792c2 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err13.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s32 X) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
index 323e2d975692f..8a3e86ecaaab9 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err14.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x p0 X) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
index d1613869bf671..228b367b06d40 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err15.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(notatype) = IMPLICIT_DEF
 ...
 
-# CHECK: expected sN, pA, <M x sN>, <M x pA>, <vscale x M x sN>, or <vscale x M x pA> for GlobalISel type
+# CHECK: expected tN, pA, <M x tN>, <M x pA>, <vscale x M x tN>, or <vscale x M x pA> for GlobalISel type, where t = {'s', 'i', 'f', 'bf'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
index c504a7d6be249..b187bff218a75 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err2.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
index c504a7d6be249..b187bff218a75 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err3.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
index 654f534f4d301..1969f28b18d28 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err4.mir
@@ -6,5 +6,5 @@ body: |
     %0:_(<vscale x notanint) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
 
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
index 26be2868c522e..49823f7432ba9 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err5.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x 4) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
index 07a30f57139dc..7f8e421e76ae3 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err6.mir
@@ -7,5 +7,5 @@ body: |
     %0:_(<vscale x 4 x) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
 
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
index dba902efe6331..ae3c472e2557c 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err7.mir
@@ -6,4 +6,4 @@ body: |
     %0:_(<vscale x 4 x notansorp) = IMPLICIT_DEF
 ...
 
-# CHECK: expected <vscale x M x sN> or <vscale M x pA> for vector type
+# CHECK: expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
index 8bedeabaa7906..9044f77f84dc5 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err8.mir
@@ -7,4 +7,4 @@ body: |
     %0:_(<vscale x 4 x s) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
diff --git a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
index fd0b9a4a054ab..9220715320d99 100644
--- a/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
+++ b/llvm/test/CodeGen/MIR/Generic/scalable-vector-type-err9.mir
@@ -6,6 +6,6 @@ body: |
     %0:_(<vscale x 4 x pX) = IMPLICIT_DEF
 ...
 
-# CHECK: expected integers after 's'/'p' type character
+# CHECK: expected integers after 's'/'i'/'f'/'bf'/'p' type identifier
 
 
diff --git a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
index 72908711e9ee5..7a2e381372549 100644
--- a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
+++ b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid0.mir
@@ -1,5 +1,5 @@
 # RUN: not llc -mtriple=wasm32-unknown-unknown -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a typed immediate operand is only a single 'i'/'s'/'p' character
+# When a typed immediate operand is only a single type character
 ---
 name: test_typed_immediate_operand_invalid0
 liveins:
@@ -7,7 +7,7 @@ liveins:
 body: |
   bb.0:
     liveins: $arguments
-    ; CHECK: [[@LINE+1]]:24: expected integers after 'i'/'s'/'p' type character
+    ; CHECK: [[@LINE+1]]:24: expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier
     %0:i32 = CONST_I32 i 0, implicit-def dead $arguments
     RETURN implicit-def dead $arguments
 ...
diff --git a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
index f5c16b52553bf..7f59a9e5eca0f 100644
--- a/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
+++ b/llvm/test/CodeGen/MIR/WebAssembly/typed-immediate-operand-invalid1.mir
@@ -1,5 +1,5 @@
 # RUN: not llc -mtriple=wasm32-unknown-unknown -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
-# When a typed immediate operand does not start with 'i', 's', or 'p'
+# When a typed immediate operand does not start with right type identifier
 ---
 name: test_typed_immediate_operand_invalid1
 liveins:
@@ -7,7 +7,7 @@ liveins:
 body: |
   bb.0:
     liveins: $arguments
-    ; CHECK: [[@LINE+1]]:24: a typed immediate operand should start with one of 'i', 's', or 'p'
+    ; CHECK: [[@LINE+1]]:24: a typed immediate operand should start with one of 'i', 's', 'f', 'bf', or 'p'
     %0:i32 = CONST_I32 abc 0, implicit-def dead $arguments
     RETURN implicit-def dead $arguments
 ...

>From 655b85ada6d0c441ad6e36d4474ad31f5326dc5a Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Fri, 22 Aug 2025 19:32:47 +0300
Subject: [PATCH 6/9] enable bfloat in IRTranslator for targets that support it

---
 .../llvm/CodeGen/GlobalISel/IRTranslator.h    |  2 +
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  | 37 +++++++++++--------
 2 files changed, 23 insertions(+), 16 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 3d7ccd55ee042..5673fd5168477 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -642,6 +642,8 @@ class IRTranslator : public MachineFunctionPass {
 
   StackProtectorDescriptor SPDescriptor;
 
+  bool mayTranslateUserTypes(const User &U) const;
+
   /// Switch analysis and optimization.
   class GISelSwitchLowering : public SwitchCG::SwitchLowering {
   public:
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 4484a0769ad2a..0abaeea482f24 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -295,19 +295,10 @@ void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
   MachinePreds[Edge].push_back(NewPred);
 }
 
-static bool containsBF16Type(const User &U) {
-  // BF16 cannot currently be represented by LLT, to avoid miscompiles we
-  // prevent any instructions using them. FIXME: This can be removed once LLT
-  // supports bfloat.
-  return U.getType()->getScalarType()->isBFloatTy() ||
-         any_of(U.operands(), [](Value *V) {
-           return V->getType()->getScalarType()->isBFloatTy();
-         });
-}
 
 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
                                      MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   // Get or create a virtual register for each value.
@@ -329,7 +320,7 @@ bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
 
 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
                                     MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   Register Op0 = getOrCreateVReg(*U.getOperand(0));
@@ -349,7 +340,7 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
 
 bool IRTranslator::translateCompare(const User &U,
                                     MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   auto *CI = cast<CmpInst>(&U);
@@ -1570,7 +1561,7 @@ bool IRTranslator::translateBitCast(const User &U,
 
 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
                                  MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   uint32_t Flags = 0;
@@ -2675,7 +2666,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
 
 bool IRTranslator::translateInlineAsm(const CallBase &CB,
                                       MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(CB))
+  if (!mayTranslateUserTypes(CB))
     return false;
 
   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
@@ -2766,7 +2757,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
 }
 
 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   const CallInst &CI = cast<CallInst>(U);
@@ -3392,7 +3383,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
 
 bool IRTranslator::translateAtomicRMW(const User &U,
                                       MachineIRBuilder &MIRBuilder) {
-  if (containsBF16Type(U))
+  if (!mayTranslateUserTypes(U))
     return false;
 
   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
@@ -3733,6 +3724,20 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
   return true;
 }
 
+bool IRTranslator::mayTranslateUserTypes(const User &U) const {
+  if (TLI->getTargetMachine().Options.EnableGlobalISelExtendedLLT) {
+    return true;
+  }
+
+  // BF16 cannot currently be represented by default LLT, to avoid miscompiles
+  // we prevent any instructions using them in targets with disabled
+  // TargetOptions::EnableGlobalISelExtendedLLT.
+  return !U.getType()->getScalarType()->isBFloatTy() &&
+         !any_of(U.operands(), [](Value *V) {
+           return V->getType()->getScalarType()->isBFloatTy();
+         });
+}
+
 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
                                       MachineBasicBlock &MBB) {
   for (auto &BTB : SL->BitTestCases) {

>From b767e6ee5420427e5fa7769cdd30c40e66dce340 Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Sat, 23 Aug 2025 18:20:22 +0300
Subject: [PATCH 7/9] [unittest] Added simple IRTranslator test for bf16

---
 .../CodeGen/GlobalISel/CMakeLists.txt         |   1 +
 .../GlobalISel/IRTranslatorBF16Test.cpp       | 130 ++++++++++++++++++
 2 files changed, 131 insertions(+)
 create mode 100644 llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp

diff --git a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
index 4ef6aff943f73..e85a3aa72c961 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -14,6 +14,7 @@ set(LLVM_LINK_COMPONENTS
   )
 
 add_llvm_unittest(GlobalISelTests
+  IRTranslatorBF16Test.cpp
   ConstantFoldingTest.cpp
   CSETest.cpp
   GIMatchTableExecutorTest.cpp
diff --git a/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
new file mode 100644
index 0000000000000..31ec0e34a0661
--- /dev/null
+++ b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
@@ -0,0 +1,130 @@
+//===- IRTranslator.cpp - IRTranslator unit tests -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "gtest/gtest.h"
+#include <memory>
+
+using namespace llvm;
+
+namespace {
+struct AArch64IRTranslatorTest : public ::testing::Test {
+  LLVMContext C;
+
+public:
+  AArch64IRTranslatorTest() {}
+  std::unique_ptr<TargetMachine> createTargetMachine() const {
+    Triple TargetTriple("aarch64--");
+    std::string Error;
+    const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
+    if (!T)
+      return nullptr;
+
+    TargetOptions Options;
+    return std::unique_ptr<TargetMachine>(
+        T->createTargetMachine(TargetTriple, "", "", Options, std::nullopt,
+                              std::nullopt, CodeGenOptLevel::Aggressive));
+  }
+
+  std::unique_ptr<Module> parseIR(const char *IR) {
+    SMDiagnostic Err;
+    std::unique_ptr<Module> Mod = parseAssemblyString(IR, Err, C);
+    if (!Mod)
+      Err.print("Test TargetIRTranslator", errs());
+    return Mod;
+  }
+};
+} // namespace
+
+TEST_F(AArch64IRTranslatorTest, IRTranslateBfloat16) {
+  InitializeAllTargets();
+  InitializeAllTargetMCs();
+  InitializeAllAsmPrinters();
+  InitializeAllAsmParsers();
+
+  PassRegistry *Registry = PassRegistry::getPassRegistry();
+  initializeCore(*Registry);
+  initializeCodeGen(*Registry);
+  initializeGlobalISel(*Registry);
+
+  std::unique_ptr<Module> M = parseIR(R"(
+  define void @foo(ptr %p0) {
+    %ptr1 = getelementptr bfloat, ptr %p0, i64 0
+    %ptr2 = getelementptr bfloat, ptr %p0, i64 1
+    %ptr3 = getelementptr bfloat, ptr %p0, i64 2
+    %a = load bfloat, ptr %ptr1, align 2
+    %b = load bfloat, ptr %ptr2, align 2
+    %c = load bfloat, ptr %ptr3, align 2
+    %mul = fmul bfloat %a, %b
+    %res = fadd bfloat %mul, %c
+    %ptr4 = getelementptr bfloat, ptr %p0, i64 3
+    store bfloat %res, ptr %ptr4, align 2
+    ret void
+  }
+  )");
+
+  auto TM = createTargetMachine();
+  M->setDataLayout(TM->createDataLayout());
+
+  TM->setGlobalISel(true);
+  TM->setGlobalISelExtendedLLT(true);
+  TM->setGlobalISelAbort(GlobalISelAbortMode::DisableWithDiag);
+
+  legacy::PassManager PM;
+  TargetPassConfig *TPC(TM->createPassConfig(PM));
+
+  MachineModuleInfoWrapperPass *MMIWP = new MachineModuleInfoWrapperPass(TM.get());
+  PM.add(TPC);
+  PM.add(MMIWP);
+  PM.add(new IRTranslator());
+  PM.run(*M);
+
+  auto *MMI = &MMIWP->getMMI();
+  Function *F = M->getFunction("foo");
+  auto *MF = MMI->getMachineFunction(*F);
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  ASSERT_FALSE(MF->getProperties().hasProperty(
+      llvm::MachineFunctionProperties::Property::FailedISel));
+  for (auto &MI : MF->front()) {
+    if (MI.getOpcode() == TargetOpcode::G_LOAD) {
+      ASSERT_EQ(MRI.getType(MI.getOperand(0).getReg()), LLT::bfloat16());
+    }
+
+    if (MI.getOpcode() == TargetOpcode::G_FADD ||
+        MI.getOpcode() == TargetOpcode::G_FMUL) {
+      for (auto &Op : MI.operands()) {
+        ASSERT_EQ(MRI.getType(Op.getReg()), LLT::bfloat16());
+      }
+    }
+  }
+  MMI->deleteMachineFunctionFor(*F);
+
+  // Run again without extended LLT
+  TM->setGlobalISelExtendedLLT(false);
+  PM.run(*M);
+  MF = MMI->getMachineFunction(*F);
+  ASSERT_TRUE(MF->getProperties().hasProperty(llvm::MachineFunctionProperties::Property::FailedISel));
+}

>From 82602b7ecdae957ef2349e36d16aecda91cfb2f0 Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Sat, 23 Aug 2025 18:45:53 +0300
Subject: [PATCH 8/9] fixed names

---
 llvm/include/llvm/CodeGenTypes/LowLevelType.h | 14 +++++++-------
 llvm/lib/CodeGenTypes/LowLevelType.cpp        |  4 ++--
 llvm/lib/Support/TypeSize.cpp                 |  2 +-
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index daec87ab536c0..0c217ba1fcf7e 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -42,9 +42,9 @@ class LLT {
   enum class FPVariant {
     IEEE_FLOAT = 0x0,
     BRAIN_FLOAT = 0x1,     // BRAIN_FLOAT
-    VARIANT_FLOAT_2 = 0x2, // PPC_FLOAT
-    VARIANT_FLOAT_3 = 0x3, // FP80
-    VARIANT_FLOAT_4 = 0x4, // TENSOR_FLOAT
+    PPC128_FLOAT = 0x2,    // PPC128_FLOAT
+    EXTENDED_FP80 = 0x3,   // FP80
+    TENSOR_FLOAT32 = 0x4,  // TENSOR_FLOAT32
     VARIANT_FLOAT_5 = 0x5, // UNASSIGNED
     VARIANT_FLOAT_6 = 0x6, // UNASSIGNED
     VARIANT_FLOAT_7 = 0x7, // UNASSIGNED
@@ -163,7 +163,7 @@ class LLT {
 
   /// Get a 80-bit X86 floating point value.
   static constexpr LLT x86fp80() {
-    return floatingPoint(80, FPVariant::VARIANT_FLOAT_3);
+    return floatingPoint(80, FPVariant::EXTENDED_FP80);
   }
 
   /// Get a 128-bit IEEE quad value.
@@ -173,7 +173,7 @@ class LLT {
 
   /// Get a 128-bit PowerPC double double value.
   static constexpr LLT ppcf128() {
-    return floatingPoint(128, FPVariant::VARIANT_FLOAT_2);
+    return floatingPoint(128, FPVariant::PPC128_FLOAT);
   }
 
   /// Get a low-level fixed-width vector of some number of elements and element
@@ -249,10 +249,10 @@ class LLT {
     return isVariantFloat(Size, FPVariant::BRAIN_FLOAT);
   }
   constexpr bool isX86FP80() const {
-    return isVariantFloat(80, FPVariant::VARIANT_FLOAT_3);
+    return isVariantFloat(80, FPVariant::EXTENDED_FP80);
   }
   constexpr bool isPPCF128() const {
-    return isVariantFloat(128, FPVariant::VARIANT_FLOAT_2);
+    return isVariantFloat(128, FPVariant::PPC128_FLOAT);
   }
   constexpr bool isToken() const {
     return Info == Kind::ANY_SCALAR && RawData == 0;
diff --git a/llvm/lib/CodeGenTypes/LowLevelType.cpp b/llvm/lib/CodeGenTypes/LowLevelType.cpp
index 35329793bd508..6025f3fedafa1 100644
--- a/llvm/lib/CodeGenTypes/LowLevelType.cpp
+++ b/llvm/lib/CodeGenTypes/LowLevelType.cpp
@@ -24,9 +24,9 @@ static std::optional<LLT::FPVariant> deriveFPInfo(MVT VT) {
   case MVT::bf16:
     return LLT::FPVariant::BRAIN_FLOAT;
   case MVT::f80:
-    return LLT::FPVariant::VARIANT_FLOAT_3;
+    return LLT::FPVariant::EXTENDED_FP80;
   case MVT::ppcf128:
-    return LLT::FPVariant::VARIANT_FLOAT_2;
+    return LLT::FPVariant::PPC128_FLOAT;
   default:
     return LLT::FPVariant::IEEE_FLOAT;
   }
diff --git a/llvm/lib/Support/TypeSize.cpp b/llvm/lib/Support/TypeSize.cpp
index a96fced8eff30..43346b81cd676 100644
--- a/llvm/lib/Support/TypeSize.cpp
+++ b/llvm/lib/Support/TypeSize.cpp
@@ -44,7 +44,7 @@ void llvm::reportInvalidSizeRequest(const char *Msg) {
     return;
   }
 #endif
-  report_fatal_error(Twine("Invalid size request on a scalable vector.") + Msg);
+  report_fatal_error("Invalid size request on a scalable vector.");
 }
 
 TypeSize::operator TypeSize::ScalarTy() const {

>From 6485b50cfdea9a41c11b294b70c30b35d22ee845 Mon Sep 17 00:00:00 2001
From: Denis Gerasimov <dengzmm at gmail.com>
Date: Sat, 23 Aug 2025 19:21:30 +0300
Subject: [PATCH 9/9] code style fixes

---
 llvm/include/llvm/CodeGen/Analysis.h             |  3 +--
 .../llvm/CodeGen/GlobalISel/LegalizerInfo.h      |  3 ++-
 llvm/include/llvm/CodeGen/LowLevelTypeUtils.h    |  3 ++-
 llvm/include/llvm/CodeGenTypes/LowLevelType.h    | 13 +++++++++----
 llvm/include/llvm/Target/TargetMachine.h         |  4 +++-
 llvm/include/llvm/Target/TargetOptions.h         |  4 ++--
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp     |  1 -
 llvm/lib/CodeGen/MIRParser/MIParser.cpp          | 16 ++++++++++------
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp    |  3 ++-
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp   |  5 +++--
 .../CodeGen/GlobalISel/IRTranslatorBF16Test.cpp  | 10 ++++++----
 11 files changed, 40 insertions(+), 25 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/Analysis.h b/llvm/include/llvm/CodeGen/Analysis.h
index 2aadc27930b7a..5c36ecd26719e 100644
--- a/llvm/include/llvm/CodeGen/Analysis.h
+++ b/llvm/include/llvm/CodeGen/Analysis.h
@@ -101,8 +101,7 @@ inline void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
 /// If Offsets is non-null, it points to a vector to be filled in
 /// with the in-memory offsets of each of the individual values.
 ///
-void computeValueLLTs(const TargetLowering &TLI,
-                      const DataLayout &DL, Type &Ty,
+void computeValueLLTs(const TargetLowering &TLI, const DataLayout &DL, Type &Ty,
                       SmallVectorImpl<LLT> &ValueTys,
                       SmallVectorImpl<uint64_t> *Offsets = nullptr,
                       uint64_t StartingOffset = 0);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 8412042a780e8..7b6b5dbf0be05 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -380,7 +380,8 @@ LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx,
 
 /// Keep the same scalar or element type as \p TypeIdx, but take the number of
 /// elements from \p Ty.
-LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx, ElementCount EC);
+LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx,
+                                               ElementCount EC);
 
 /// Change the scalar size or element size to have the same scalar size as type
 /// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
diff --git a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
index 1c8f34b1a8518..4092509948ae6 100644
--- a/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
+++ b/llvm/include/llvm/CodeGen/LowLevelTypeUtils.h
@@ -27,7 +27,8 @@ class Type;
 struct fltSemantics;
 
 /// Construct a low-level type based on an LLVM type.
-LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL, bool AllowExtendedLLT = false);
+LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL,
+                           bool AllowExtendedLLT = false);
 
 /// Get a rough equivalent of an MVT for a given LLT. MVT can't distinguish
 /// pointers, so these will convert to a plain integer.
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 0c217ba1fcf7e..3378baf680230 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -178,7 +178,8 @@ class LLT {
 
   /// Get a low-level fixed-width vector of some number of elements and element
   /// width.
-  static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits) {
+  static constexpr LLT fixed_vector(unsigned NumElements,
+                                    unsigned ScalarSizeInBits) {
     return vector(ElementCount::getFixed(NumElements),
                   LLT::scalar(ScalarSizeInBits));
   }
@@ -191,7 +192,8 @@ class LLT {
 
   /// Get a low-level scalable vector of some number of elements and element
   /// width.
-  static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits) {
+  static constexpr LLT scalable_vector(unsigned MinNumElements,
+                                       unsigned ScalarSizeInBits) {
     return vector(ElementCount::getScalable(MinNumElements),
                   LLT::scalar(ScalarSizeInBits));
   }
@@ -222,7 +224,10 @@ class LLT {
   explicit constexpr LLT() : Info(static_cast<Kind>(0)), RawData(0) {}
 
   constexpr bool isValid() const { return isToken() || RawData != 0; }
-  constexpr bool isScalar() const { return Info == Kind::ANY_SCALAR || Info == Kind::INTEGER || Info == Kind::FLOAT; }
+  constexpr bool isScalar() const {
+    return Info == Kind::ANY_SCALAR || Info == Kind::INTEGER ||
+           Info == Kind::FLOAT;
+  }
   constexpr bool isScalar(unsigned Size) const {
     return isScalar() && getScalarSizeInBits() == Size;
   }
@@ -274,7 +279,7 @@ class LLT {
   }
   constexpr bool isVector() const {
     return isValid() &&
-           (Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER||
+           (Info == Kind::VECTOR_ANY || Info == Kind::VECTOR_INTEGER ||
             Info == Kind::VECTOR_FLOAT || Info == Kind::VECTOR_POINTER);
   }
   constexpr bool isPointer() const {
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index 5671318a8586d..a503199856c5a 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -295,7 +295,9 @@ class LLVM_ABI TargetMachine {
   bool getO0WantsFastISel() { return O0WantsFastISel; }
   void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
   void setGlobalISel(bool Enable) { Options.EnableGlobalISel = Enable; }
-  void setGlobalISelExtendedLLT(bool Enable) { Options.EnableGlobalISelExtendedLLT = Enable; }
+  void setGlobalISelExtendedLLT(bool Enable) {
+    Options.EnableGlobalISelExtendedLLT = Enable;
+  }
   void setGlobalISelAbort(GlobalISelAbortMode Mode) {
     Options.GlobalISelAbort = Mode;
   }
diff --git a/llvm/include/llvm/Target/TargetOptions.h b/llvm/include/llvm/Target/TargetOptions.h
index 839412cd151bf..b167d4e13a342 100644
--- a/llvm/include/llvm/Target/TargetOptions.h
+++ b/llvm/include/llvm/Target/TargetOptions.h
@@ -236,8 +236,8 @@ class TargetOptions {
   /// EnableGlobalISel - This flag enables global instruction selection.
   unsigned EnableGlobalISel : 1;
 
-  /// EnableGlobalISelExtendedLLT - This flag enables LLTs with extenden type info
-  /// so target may distinguish different formats of equal sized scalars.
+  /// EnableGlobalISelExtendedLLT - This flag enables LLTs with extenden type
+  /// info so target may distinguish different formats of equal sized scalars.
   unsigned EnableGlobalISelExtendedLLT : 1;
 
   /// EnableGlobalISelAbort - Control abort behaviour when global instruction
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 670e03e69a9c4..8dac12fc4597a 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -294,7 +294,6 @@ void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
   MachinePreds[Edge].push_back(NewPred);
 }
 
-
 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
                                      MachineIRBuilder &MIRBuilder) {
   if (!mayTranslateUserTypes(U))
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 699f4d3529b6e..94205698c5038 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1934,7 +1934,8 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
       TypeDigits.consume_front("f") || TypeDigits.consume_front("p") ||
       TypeDigits.consume_front("bf")) {
     if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
-      return error("expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
+      return error(
+          "expected integers after 's'/'i'/'f'/'bf'/'p' type identifier");
   }
 
   if (Token.range().starts_with("s") || Token.range().starts_with("i")) {
@@ -1994,15 +1995,17 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
   if (HasVScale) {
     lex();
     if (Token.isNot(MIToken::Identifier) || Token.stringValue() != "x")
-      return error("expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}");
+      return error(
+          "expected <vscale x M x tN>, where t = {'s', 'i', 'f', 'bf', 'p'}");
     lex();
   }
 
   auto GetError = [this, &HasVScale, Loc]() {
     if (HasVScale)
-      return error(
-          Loc, "expected <vscale x M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}");
-    return error(Loc, "expected <M x tN> for vector type, where t = {'s', 'i', 'f', 'bf', 'p'}");
+      return error(Loc, "expected <vscale x M x tN> for vector type, where t = "
+                        "{'s', 'i', 'f', 'bf', 'p'}");
+    return error(Loc, "expected <M x tN> for vector type, where t = {'s', 'i', "
+                      "'f', 'bf', 'p'}");
   };
 
   if (Token.isNot(MIToken::IntegerLiteral))
@@ -2073,7 +2076,8 @@ bool MIParser::parseTypedImmediateOperand(MachineOperand &Dest) {
     return error("a typed immediate operand should start with one of 'i', "
                  "'s', 'f', 'bf', or 'p'");
   if (TypeDigits.empty() || !llvm::all_of(TypeDigits, isdigit))
-    return error("expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier");
+    return error(
+        "expected integers after 'i'/'s'/'f'/'bf'/'p' type identifier");
 
   auto Loc = Token.location();
   lex();
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index aa598dec1c800..29d5cec49d579 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -570,7 +570,8 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
     case Intrinsic::aarch64_neon_fcvtpu:
       // Force FPR register bank for half types, as those types otherwise
       // don't get legalized correctly resulting in fp16 <-> gpr32 COPY's.
-      return MRI.getType(MI.getOperand(2).getReg()) == LLT::scalar(16); // TODO: Expected LLT::float16()
+      return MRI.getType(MI.getOperand(2).getReg()) ==
+             LLT::scalar(16); // TODO: Expected LLT::float16()
     default:
       break;
     }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index ccc01ddfce777..a4e2f321be91d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -317,8 +317,9 @@ constexpr LLT V10S16 = LLT::fixed_vector(10, 16);
 constexpr LLT V12S16 = LLT::fixed_vector(12, 16);
 constexpr LLT V16S16 = LLT::fixed_vector(16, 16);
 
-constexpr LLT V2F16 = LLT::fixed_vector(2, LLT::scalar(16)); // TODO: Expected float16
-constexpr LLT V2BF16 = V2F16; // FIXME
+constexpr LLT V2F16 =
+    LLT::fixed_vector(2, LLT::scalar(16)); // TODO: Expected float16
+constexpr LLT V2BF16 = V2F16;              // FIXME
 
 constexpr LLT V2S32 = LLT::fixed_vector(2, 32);
 constexpr LLT V3S32 = LLT::fixed_vector(3, 32);
diff --git a/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
index 31ec0e34a0661..c532634c19817 100644
--- a/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/IRTranslatorBF16Test.cpp
@@ -6,9 +6,9 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -46,7 +46,7 @@ struct AArch64IRTranslatorTest : public ::testing::Test {
     TargetOptions Options;
     return std::unique_ptr<TargetMachine>(
         T->createTargetMachine(TargetTriple, "", "", Options, std::nullopt,
-                              std::nullopt, CodeGenOptLevel::Aggressive));
+                               std::nullopt, CodeGenOptLevel::Aggressive));
   }
 
   std::unique_ptr<Module> parseIR(const char *IR) {
@@ -96,7 +96,8 @@ TEST_F(AArch64IRTranslatorTest, IRTranslateBfloat16) {
   legacy::PassManager PM;
   TargetPassConfig *TPC(TM->createPassConfig(PM));
 
-  MachineModuleInfoWrapperPass *MMIWP = new MachineModuleInfoWrapperPass(TM.get());
+  MachineModuleInfoWrapperPass *MMIWP =
+      new MachineModuleInfoWrapperPass(TM.get());
   PM.add(TPC);
   PM.add(MMIWP);
   PM.add(new IRTranslator());
@@ -126,5 +127,6 @@ TEST_F(AArch64IRTranslatorTest, IRTranslateBfloat16) {
   TM->setGlobalISelExtendedLLT(false);
   PM.run(*M);
   MF = MMI->getMachineFunction(*F);
-  ASSERT_TRUE(MF->getProperties().hasProperty(llvm::MachineFunctionProperties::Property::FailedISel));
+  ASSERT_TRUE(MF->getProperties().hasProperty(
+      llvm::MachineFunctionProperties::Property::FailedISel));
 }



More information about the llvm-commits mailing list