[llvm] [GlobalSel][NFC] Remove LLT initializers taking scalar sizes (PR #119725)

Tim Gymnich via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 17 02:14:33 PST 2024


https://github.com/tgymnich updated https://github.com/llvm/llvm-project/pull/119725

>From feaea47c4c185ce51c406719ef9e5d4ece60e1f5 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at brium.ai>
Date: Thu, 12 Dec 2024 17:14:46 +0000
Subject: [PATCH 1/4] remove LLT initializers taking a scalar size in favor of
 initializers taking a LLT

---
 llvm/include/llvm/CodeGen/MachineFunction.h   |  12 +-
 llvm/include/llvm/CodeGenTypes/LowLevelType.h |  37 -----
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  13 +-
 .../GlobalISel/LegacyLegalizerInfo.cpp        |  16 +-
 .../CodeGen/GlobalISel/LegalizeMutations.cpp  |   6 +-
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  68 +++++----
 llvm/lib/CodeGen/GlobalISel/Utils.cpp         |   4 +-
 llvm/lib/CodeGen/LowLevelTypeUtils.cpp        |   5 +-
 llvm/lib/CodeGen/MachineOperand.cpp           |  10 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    |   4 +-
 .../GISel/AArch64InstructionSelector.cpp      |  51 +++----
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    |  75 +++++-----
 .../GISel/AArch64PostLegalizerCombiner.cpp    |  13 +-
 .../GISel/AArch64PostLegalizerLowering.cpp    |   4 +-
 .../GISel/AArch64PreLegalizerCombiner.cpp     |  49 ++++---
 .../Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp |   3 +-
 llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |   4 +-
 .../AMDGPU/AMDGPUInstructionSelector.cpp      |   9 +-
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 115 ++++++++-------
 .../AMDGPU/AMDGPUPreLegalizerCombiner.cpp     |   2 +-
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |  12 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   4 +-
 llvm/lib/Target/Mips/MipsLegalizerInfo.cpp    |   8 +-
 .../Target/PowerPC/GISel/PPCLegalizerInfo.cpp |   8 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  24 ++-
 .../lib/Target/X86/GISel/X86LegalizerInfo.cpp |  29 ++--
 llvm/unittests/CodeGen/GlobalISel/CSETest.cpp |  12 +-
 .../CodeGen/GlobalISel/GISelUtilsTest.cpp     | 112 ++++++++------
 .../GlobalISel/KnownBitsVectorTest.cpp        |   4 +-
 .../GlobalISel/LegalizerHelperTest.cpp        |  80 +++++-----
 .../CodeGen/GlobalISel/LegalizerInfoTest.cpp  |  87 +++++------
 .../CodeGen/GlobalISel/LegalizerTest.cpp      |   4 +-
 .../GlobalISel/MachineIRBuilderTest.cpp       |  32 ++--
 .../CodeGen/GlobalISel/PatternMatchTest.cpp   |   6 +-
 llvm/unittests/CodeGen/LowLevelTypeTest.cpp   | 137 ++++++++++--------
 .../GlobalISel/GlobalISelMatchTable.cpp       |  10 +-
 36 files changed, 550 insertions(+), 519 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index d696add8a1af53..7f1447920156df 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -1064,12 +1064,12 @@ class LLVM_ABI MachineFunction {
                                           int64_t Offset, LLT Ty);
   MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                           int64_t Offset, LocationSize Size) {
-    return getMachineMemOperand(
-        MMO, Offset,
-        !Size.hasValue() ? LLT()
-        : Size.isScalable()
-            ? LLT::scalable_vector(1, 8 * Size.getValue().getKnownMinValue())
-            : LLT::scalar(8 * Size.getValue().getKnownMinValue()));
+    if (!Size.hasValue())
+      return getMachineMemOperand(MMO, Offset, LLT());
+
+    ElementCount EC = ElementCount::get(1, Size.isScalable());
+    LLT Ty = LLT::scalar(8 * Size.getValue().getKnownMinValue());
+    return getMachineMemOperand(MMO, Offset, LLT::scalarOrVector(EC, Ty));
   }
   MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                           int64_t Offset, uint64_t Size) {
diff --git a/llvm/include/llvm/CodeGenTypes/LowLevelType.h b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
index 62ee28cfac99c5..f139800025541d 100644
--- a/llvm/include/llvm/CodeGenTypes/LowLevelType.h
+++ b/llvm/include/llvm/CodeGenTypes/LowLevelType.h
@@ -60,13 +60,6 @@ class LLT {
                ElementCount::getFixed(0), SizeInBits, AddressSpace};
   }
 
-  /// Get a low-level vector of some number of elements and element width.
-  static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
-    assert(!EC.isScalar() && "invalid number of vector elements");
-    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false,
-               EC, ScalarSizeInBits, /*AddressSpace=*/0};
-  }
-
   /// Get a low-level vector of some number of elements and element type.
   static constexpr LLT vector(ElementCount EC, LLT ScalarTy) {
     assert(!EC.isScalar() && "invalid number of vector elements");
@@ -95,26 +88,12 @@ class LLT {
     return scalar(64);
   }
 
-  /// Get a low-level fixed-width vector of some number of elements and element
-  /// width.
-  static constexpr LLT fixed_vector(unsigned NumElements,
-                                    unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
-  }
-
   /// Get a low-level fixed-width vector of some number of elements and element
   /// type.
   static constexpr LLT fixed_vector(unsigned NumElements, LLT ScalarTy) {
     return vector(ElementCount::getFixed(NumElements), ScalarTy);
   }
 
-  /// Get a low-level scalable vector of some number of elements and element
-  /// width.
-  static constexpr LLT scalable_vector(unsigned MinNumElements,
-                                       unsigned ScalarSizeInBits) {
-    return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
-  }
-
   /// Get a low-level scalable vector of some number of elements and element
   /// type.
   static constexpr LLT scalable_vector(unsigned MinNumElements, LLT ScalarTy) {
@@ -125,12 +104,6 @@ class LLT {
     return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
   }
 
-  static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
-    assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
-           "Not enough bits in LLT to represent size");
-    return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
-  }
-
   explicit constexpr LLT(bool isPointer, bool isVector, bool isScalar,
                          ElementCount EC, uint64_t SizeInBits,
                          unsigned AddressSpace)
@@ -215,16 +188,6 @@ class LLT {
     return isVector() ? LLT::vector(getElementCount(), NewEltTy) : NewEltTy;
   }
 
-  /// If this type is a vector, return a vector with the same number of elements
-  /// but the new element size. Otherwise, return the new element type. Invalid
-  /// for pointer types. For pointer types, use changeElementType.
-  constexpr LLT changeElementSize(unsigned NewEltSize) const {
-    assert(!isPointerOrPointerVector() &&
-           "invalid to directly change element size for pointers");
-    return isVector() ? LLT::vector(getElementCount(), NewEltSize)
-                      : LLT::scalar(NewEltSize);
-  }
-
   /// Return a vector or scalar with the same element type and the new element
   /// count.
   constexpr LLT changeElementCount(ElementCount EC) const {
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e31980041e1920..63fead0e2369db 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2580,7 +2580,7 @@ static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
 
   // ShiftTy > 32 > TruncTy -> 32
   if (ShiftSize > 32 && TruncSize < 32)
-    return ShiftTy.changeElementSize(32);
+    return ShiftTy.changeElementType(LLT::scalar(32));
 
   // TODO: We could also reduce to 16 bits, but that's more target-dependent.
   //  Some targets like it, some don't, some only like it under certain
@@ -5361,9 +5361,8 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
 
   Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
   auto One = MIB.buildConstant(Ty, 1);
-  auto IsOne = MIB.buildICmp(
-      CmpInst::Predicate::ICMP_EQ,
-      Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
+  auto IsOne = MIB.buildICmp(CmpInst::Predicate::ICMP_EQ,
+                             Ty.changeElementType(LLT::scalar(1)), RHS, One);
   return MIB.buildSelect(Ty, IsOne, LHS, Q);
 }
 
@@ -5402,8 +5401,7 @@ bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
       return false;
     if (!isLegalOrBeforeLegalizer(
             {TargetOpcode::G_ICMP,
-             {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1),
-              DstTy}}))
+             {DstTy.changeElementType(LLT::scalar(1)), DstTy}}))
       return false;
   }
 
@@ -5535,8 +5533,7 @@ void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
   Register RHS = SDiv.getReg(2);
   LLT Ty = MRI.getType(Dst);
   LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
-  LLT CCVT =
-      Ty.isVector() ? LLT::vector(Ty.getElementCount(), 1) : LLT::scalar(1);
+  LLT CCVT = Ty.changeElementType(LLT::scalar(1));
 
   // Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
   // to the following version:
diff --git a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
index 45b403bdd07658..93561b04150406 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
@@ -343,12 +343,11 @@ LegacyLegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
       ScalarInVectorActions[OpcodeIdx][TypeIdx];
 
   LLT IntermediateType;
-  auto ElementSizeAndAction =
+  auto &&[ElementSize, Action1] =
       findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits());
-  IntermediateType = LLT::fixed_vector(Aspect.Type.getNumElements(),
-                                       ElementSizeAndAction.first);
-  if (ElementSizeAndAction.second != Legal)
-    return {ElementSizeAndAction.second, IntermediateType};
+  IntermediateType = Aspect.Type.changeElementType(LLT::scalar(ElementSize));
+  if (Action1 != Legal)
+    return {Action1, IntermediateType};
 
   auto i = NumElements2Actions[OpcodeIdx].find(
       IntermediateType.getScalarSizeInBits());
@@ -356,11 +355,10 @@ LegacyLegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
     return {NotFound, IntermediateType};
   }
   const SizeAndActionsVec &NumElementsVec = (*i).second[TypeIdx];
-  auto NumElementsAndAction =
+  auto &&[NumElements, Action2] =
       findAction(NumElementsVec, IntermediateType.getNumElements());
-  return {NumElementsAndAction.second,
-          LLT::fixed_vector(NumElementsAndAction.first,
-                            IntermediateType.getScalarSizeInBits())};
+  return {Action2,
+          LLT::fixed_vector(NumElements, IntermediateType.getScalarType())};
 }
 
 unsigned LegacyLegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const {
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
index 25c1db91b05d8e..b718300537ee21 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizeMutations.cpp
@@ -80,7 +80,8 @@ LegalizeMutation LegalizeMutations::widenScalarOrEltToNextPow2(unsigned TypeIdx,
     const LLT Ty = Query.Types[TypeIdx];
     unsigned NewEltSizeInBits =
         std::max(1u << Log2_32_Ceil(Ty.getScalarSizeInBits()), Min);
-    return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits));
+    return std::make_pair(TypeIdx,
+                          Ty.changeElementType(LLT::scalar(NewEltSizeInBits)));
   };
 }
 
@@ -90,7 +91,8 @@ LegalizeMutations::widenScalarOrEltToNextMultipleOf(unsigned TypeIdx,
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
     unsigned NewEltSizeInBits = alignTo(Ty.getScalarSizeInBits(), Size);
-    return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits));
+    return std::make_pair(TypeIdx,
+                          Ty.changeElementType(LLT::scalar(NewEltSizeInBits)));
   };
 }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index e2247f76098e97..402e36a7c3d8e1 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3138,9 +3138,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
       LLT VecTy = MRI.getType(VecReg);
       Observer.changingInstr(MI);
 
-      widenScalarSrc(
-          MI, LLT::vector(VecTy.getElementCount(), WideTy.getSizeInBits()), 1,
-          TargetOpcode::G_ANYEXT);
+      widenScalarSrc(MI, VecTy.changeElementType(WideTy), 1,
+                     TargetOpcode::G_ANYEXT);
 
       widenScalarDst(MI, WideTy, 0);
       Observer.changedInstr(MI);
@@ -3872,7 +3871,7 @@ LegalizerHelper::bitcastExtractSubvector(MachineInstr &MI, unsigned TypeIdx,
     return UnableToLegalize;
 
   Idx /= AdjustAmt;
-  SrcTy = LLT::vector(SrcTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
+  SrcTy = SrcTy.divide(AdjustAmt).changeElementType(LLT::scalar(AdjustAmt));
   auto CastVec = MIRBuilder.buildBitcast(SrcTy, Src);
   auto PromotedES = MIRBuilder.buildExtractSubvector(CastTy, CastVec, Idx);
   MIRBuilder.buildBitcast(Dst, PromotedES);
@@ -3940,8 +3939,10 @@ LegalizerHelper::bitcastInsertSubvector(MachineInstr &MI, unsigned TypeIdx,
     return UnableToLegalize;
 
   Idx /= AdjustAmt;
-  BigVecTy = LLT::vector(BigVecTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
-  SubVecTy = LLT::vector(SubVecTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
+  BigVecTy =
+      BigVecTy.divide(AdjustAmt).changeElementType(LLT::scalar(AdjustAmt));
+  SubVecTy =
+      SubVecTy.divide(AdjustAmt).changeElementType(LLT::scalar(AdjustAmt));
   auto CastBigVec = MIRBuilder.buildBitcast(BigVecTy, BigVec);
   auto CastSubVec = MIRBuilder.buildBitcast(SubVecTy, SubVec);
   auto PromotedIS =
@@ -4689,7 +4690,8 @@ Register LegalizerHelper::getVectorElementPointer(Register VecPtr, LLT VecTy,
   const DataLayout &DL = MIRBuilder.getDataLayout();
   unsigned AS = MRI.getType(VecPtr).getAddressSpace();
   unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
-  LLT IdxTy = MRI.getType(Index).changeElementSize(IndexSizeInBits);
+  LLT IdxTy =
+      MRI.getType(Index).changeElementType(LLT::scalar(IndexSizeInBits));
   if (IdxTy != MRI.getType(Index))
     Index = MIRBuilder.buildSExtOrTrunc(IdxTy, Index).getReg(0);
 
@@ -6940,8 +6942,9 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI) {
       // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
       auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_UNDEF(DstTy, SrcReg);
       auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0);
-      auto ICmp = MIRBuilder.buildICmp(
-          CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc);
+      auto ICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ,
+                                       SrcTy.changeElementType(LLT::scalar(1)),
+                                       SrcReg, ZeroSrc);
       auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
       MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU);
       MI.eraseFromParent();
@@ -6988,8 +6991,9 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI) {
       // zero.
       auto CttzZU = MIRBuilder.buildCTTZ_ZERO_UNDEF(DstTy, SrcReg);
       auto Zero = MIRBuilder.buildConstant(SrcTy, 0);
-      auto ICmp = MIRBuilder.buildICmp(
-          CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero);
+      auto ICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ,
+                                       DstTy.changeElementType(LLT::scalar(1)),
+                                       SrcReg, Zero);
       auto LenConst = MIRBuilder.buildConstant(DstTy, Len);
       MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU);
       MI.eraseFromParent();
@@ -7234,7 +7238,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerEXT(MachineInstr &MI) {
   // The step between extend is too large, split it by creating an intermediate
   // extend instruction
   if (SrcTyScalarSize * 2 < DstTyScalarSize) {
-    LLT MidTy = SrcTy.changeElementSize(SrcTyScalarSize * 2);
+    LLT MidTy = SrcTy.changeElementType(LLT::scalar(SrcTyScalarSize * 2));
     // If the destination type is illegal, split it into multiple statements
     // zext x -> zext(merge(zext(unmerge), zext(unmerge)))
     auto NewExt = MIRBuilder.buildInstr(MI.getOpcode(), {MidTy}, {Src});
@@ -7293,16 +7297,17 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerTRUNC(MachineInstr &MI) {
     // Truncate the splits into intermediate narrower elements.
     LLT InterTy;
     if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
-      InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
+      InterTy = SplitSrcTy.changeElementType(
+          LLT::scalar(DstTy.getScalarSizeInBits() * 2));
     else
-      InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits());
+      InterTy = SplitSrcTy.changeElementType(DstTy.getScalarType());
     for (unsigned I = 0; I < SplitSrcs.size(); ++I) {
       SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0);
     }
 
     // Combine the new truncates into one vector
     auto Merge = MIRBuilder.buildMergeLikeInstr(
-        DstTy.changeElementSize(InterTy.getScalarSizeInBits()), SplitSrcs);
+        DstTy.changeElementType(InterTy.getScalarType()), SplitSrcs);
 
     // Truncate the new vector to the final result type
     if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
@@ -7729,6 +7734,8 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
   bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) &&
                              !(MaxStatus & APFloat::opStatus::opInexact);
 
+  const LLT S1 = LLT::scalar(1);
+
   // If the integer bounds are exactly representable as floats, emit a
   // min+max+fptoi sequence. Otherwise we have to use a sequence of comparisons
   // and selects.
@@ -7736,13 +7743,13 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
     // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat.
     auto MaxC = MIRBuilder.buildFConstant(SrcTy, MinFloat);
     auto MaxP = MIRBuilder.buildFCmp(CmpInst::FCMP_ULT,
-                                     SrcTy.changeElementSize(1), Src, MaxC);
+                                     SrcTy.changeElementType(S1), Src, MaxC);
     auto Max = MIRBuilder.buildSelect(SrcTy, MaxP, Src, MaxC);
     // Clamp by MaxFloat from above. NaN cannot occur.
     auto MinC = MIRBuilder.buildFConstant(SrcTy, MaxFloat);
     auto MinP =
-        MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Max,
-                             MinC, MachineInstr::FmNoNans);
+        MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementType(S1),
+                             Max, MinC, MachineInstr::FmNoNans);
     auto Min =
         MIRBuilder.buildSelect(SrcTy, MinP, Max, MinC, MachineInstr::FmNoNans);
     // Convert clamped value to integer. In the unsigned case we're done,
@@ -7756,7 +7763,7 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
     // Otherwise, select 0 if Src is NaN.
     auto FpToInt = MIRBuilder.buildFPTOSI(DstTy, Min);
     auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO,
-                                       DstTy.changeElementSize(1), Src, Src);
+                                       DstTy.changeElementType(S1), Src, Src);
     MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0),
                            FpToInt);
     MI.eraseFromParent();
@@ -7772,13 +7779,13 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
   // If Src ULT MinFloat, select MinInt. In particular, this also selects
   // MinInt if Src is NaN.
   auto ULT =
-      MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, SrcTy.changeElementSize(1), Src,
+      MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, SrcTy.changeElementType(S1), Src,
                            MIRBuilder.buildFConstant(SrcTy, MinFloat));
   auto Max = MIRBuilder.buildSelect(
       DstTy, ULT, MIRBuilder.buildConstant(DstTy, MinInt), FpToInt);
   // If Src OGT MaxFloat, select MaxInt.
   auto OGT =
-      MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Src,
+      MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementType(S1), Src,
                            MIRBuilder.buildFConstant(SrcTy, MaxFloat));
 
   // In the unsigned case we are done, because we mapped NaN to MinInt, which
@@ -7794,7 +7801,7 @@ LegalizerHelper::lowerFPTOINT_SAT(MachineInstr &MI) {
   auto Min = MIRBuilder.buildSelect(
       DstTy, OGT, MIRBuilder.buildConstant(DstTy, MaxInt), Max);
   auto IsZero = MIRBuilder.buildFCmp(CmpInst::FCMP_UNO,
-                                     DstTy.changeElementSize(1), Src, Src);
+                                     DstTy.changeElementType(S1), Src, Src);
   MIRBuilder.buildSelect(Dst, IsZero, MIRBuilder.buildConstant(DstTy, 0), Min);
   MI.eraseFromParent();
   return Legalized;
@@ -7957,7 +7964,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerMinMax(MachineInstr &MI) {
   auto [Dst, Src0, Src1] = MI.getFirst3Regs();
 
   const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode());
-  LLT CmpType = MRI.getType(Dst).changeElementSize(1);
+  LLT CmpType = MRI.getType(Dst).changeElementType(LLT::scalar(1));
 
   auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1);
   MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1);
@@ -7973,7 +7980,7 @@ LegalizerHelper::lowerThreewayCompare(MachineInstr &MI) {
   Register Dst = Cmp->getReg(0);
   LLT DstTy = MRI.getType(Dst);
   LLT SrcTy = MRI.getType(Cmp->getReg(1));
-  LLT CmpTy = DstTy.changeElementSize(1);
+  LLT CmpTy = DstTy.changeElementType(LLT::scalar(1));
 
   CmpInst::Predicate LTPredicate = Cmp->isSigned()
                                        ? CmpInst::Predicate::ICMP_SLT
@@ -8102,7 +8109,7 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
   auto [DstReg, X] = MI.getFirst2Regs();
   const unsigned Flags = MI.getFlags();
   const LLT Ty = MRI.getType(DstReg);
-  const LLT CondTy = Ty.changeElementSize(1);
+  const LLT CondTy = Ty.changeElementType(LLT::scalar(1));
 
   // round(x) =>
   //  t = trunc(x);
@@ -8135,7 +8142,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFFloor(MachineInstr &MI) {
   auto [DstReg, SrcReg] = MI.getFirst2Regs();
   unsigned Flags = MI.getFlags();
   LLT Ty = MRI.getType(DstReg);
-  const LLT CondTy = Ty.changeElementSize(1);
+  const LLT CondTy = Ty.changeElementType(LLT::scalar(1));
 
   // result = trunc(src);
   // if (src < 0.0 && src != result)
@@ -8382,7 +8389,8 @@ LegalizerHelper::lowerVECTOR_COMPRESS(llvm::MachineInstr &MI) {
     LastWriteVal =
         MIRBuilder.buildConstant(ValTy, PassthruSplatVal.value()).getReg(0);
   } else if (HasPassthru) {
-    auto Popcount = MIRBuilder.buildZExt(MaskTy.changeElementSize(32), Mask);
+    auto Popcount =
+        MIRBuilder.buildZExt(MaskTy.changeElementType(LLT::scalar(32)), Mask);
     Popcount = MIRBuilder.buildInstr(TargetOpcode::G_VECREDUCE_ADD,
                                      {LLT::scalar(32)}, {Popcount});
 
@@ -8762,7 +8770,7 @@ LegalizerHelper::LegalizeResult
 LegalizerHelper::lowerAddSubSatToAddoSubo(MachineInstr &MI) {
   auto [Res, LHS, RHS] = MI.getFirst3Regs();
   LLT Ty = MRI.getType(Res);
-  LLT BoolTy = Ty.changeElementSize(1);
+  LLT BoolTy = Ty.changeElementType(LLT::scalar(1));
   bool IsSigned;
   bool IsAdd;
   unsigned OverflowOp;
@@ -8832,7 +8840,7 @@ LegalizerHelper::lowerShlSat(MachineInstr &MI) {
   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SSHLSAT;
   auto [Res, LHS, RHS] = MI.getFirst3Regs();
   LLT Ty = MRI.getType(Res);
-  LLT BoolTy = Ty.changeElementSize(1);
+  LLT BoolTy = Ty.changeElementType(LLT::scalar(1));
 
   unsigned BW = Ty.getScalarSizeInBits();
   auto Result = MIRBuilder.buildShl(Ty, LHS, RHS);
@@ -8987,7 +8995,7 @@ LegalizerHelper::lowerSMULH_UMULH(MachineInstr &MI) {
   Register Result = MI.getOperand(0).getReg();
   LLT OrigTy = MRI.getType(Result);
   auto SizeInBits = OrigTy.getScalarSizeInBits();
-  LLT WideTy = OrigTy.changeElementSize(SizeInBits * 2);
+  LLT WideTy = OrigTy.changeElementType(LLT::scalar(SizeInBits * 2));
 
   auto LHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(1)});
   auto RHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(2)});
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 8c1e41ea106eca..c58472c8f58bd7 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1247,7 +1247,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
     return OrigTy;
 
   if (OrigTy.isVector() && TargetTy.isVector()) {
-    LLT OrigElt = OrigTy.getElementType();
+    LLT OrigElt = LLT::scalar(OrigTy.getScalarSizeInBits());
 
     // TODO: The docstring for this function says the intention is to use this
     // function to build MERGE/UNMERGE instructions. It won't be the case that
@@ -1268,7 +1268,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
     // Cannot produce original element type, but both have vscale in common.
     if (GCD < OrigElt.getSizeInBits())
       return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
-                                 GCD);
+                                 LLT::scalar(GCD));
 
     return LLT::vector(
         ElementCount::get(GCD / OrigElt.getSizeInBits().getFixedValue(),
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index 936c9fbb2fff02..b580e5d3ba62a0 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -67,8 +67,9 @@ LLT llvm::getLLTForMVT(MVT Ty) {
   if (!Ty.isVector())
     return LLT::scalar(Ty.getSizeInBits());
 
-  return LLT::scalarOrVector(Ty.getVectorElementCount(),
-                             Ty.getVectorElementType().getSizeInBits());
+  return LLT::scalarOrVector(
+      Ty.getVectorElementCount(),
+      LLT::scalar(Ty.getVectorElementType().getSizeInBits()));
 }
 
 const llvm::fltSemantics &llvm::getFltSemanticForLLT(LLT Ty) {
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 3a9bdde28a2e71..1003f613970a12 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -27,6 +27,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/ModuleSlotTracker.h"
 #include "llvm/MC/MCDwarf.h"
+#include "llvm/Support/TypeSize.h"
 #include "llvm/Target/TargetIntrinsicInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include <optional>
@@ -1118,10 +1119,11 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags F,
                                      AtomicOrdering FailureOrdering)
     : MachineMemOperand(
           ptrinfo, F,
-          !TS.hasValue() ? LLT()
-          : TS.isScalable()
-              ? LLT::scalable_vector(1, 8 * TS.getValue().getKnownMinValue())
-              : LLT::scalar(8 * TS.getValue().getKnownMinValue()),
+          !TS.hasValue()
+              ? LLT()
+              : LLT::scalarOrVector(
+                    ElementCount::get(1, TS.isScalable()),
+                    LLT::scalar(8 * TS.getValue().getKnownMinValue())),
           BaseAlignment, AAInfo, Ranges, SSID, Ordering, FailureOrdering) {}
 
 void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 28f304100326c6..f2994001ecf458 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2624,7 +2624,7 @@ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
 
             // Disregard v2i64. Memcpy lowering produces those and splitting
             // them regresses performance on micro-benchmarks and olden/bh.
-            Ty == LLT::fixed_vector(2, 64);
+            Ty == LLT::fixed_vector(2, LLT::scalar(64));
   }
   return true;
 }
@@ -17765,7 +17765,7 @@ LLT AArch64TargetLowering::getOptimalMemOpLLT(
 
   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
       AlignmentIsAcceptable(MVT::v2i64, Align(16)))
-    return LLT::fixed_vector(2, 64);
+    return LLT::fixed_vector(2, LLT::scalar(64));
   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
     return LLT::scalar(128);
   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 5000078928a1d2..8592f19a818bd4 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1911,19 +1911,19 @@ bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,
   std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
 
   unsigned Opc = 0;
-  if (Ty == LLT::fixed_vector(2, 64)) {
+  if (Ty == LLT::fixed_vector(2, LLT::scalar(64))) {
     Opc = ImmVal ? AArch64::SHLv2i64_shift : AArch64::USHLv2i64;
-  } else if (Ty == LLT::fixed_vector(4, 32)) {
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(32))) {
     Opc = ImmVal ? AArch64::SHLv4i32_shift : AArch64::USHLv4i32;
-  } else if (Ty == LLT::fixed_vector(2, 32)) {
+  } else if (Ty == LLT::fixed_vector(2, LLT::scalar(32))) {
     Opc = ImmVal ? AArch64::SHLv2i32_shift : AArch64::USHLv2i32;
-  } else if (Ty == LLT::fixed_vector(4, 16)) {
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(16))) {
     Opc = ImmVal ? AArch64::SHLv4i16_shift : AArch64::USHLv4i16;
-  } else if (Ty == LLT::fixed_vector(8, 16)) {
+  } else if (Ty == LLT::fixed_vector(8, LLT::scalar(16))) {
     Opc = ImmVal ? AArch64::SHLv8i16_shift : AArch64::USHLv8i16;
-  } else if (Ty == LLT::fixed_vector(16, 8)) {
+  } else if (Ty == LLT::fixed_vector(16, LLT::scalar(8))) {
     Opc = ImmVal ? AArch64::SHLv16i8_shift : AArch64::USHLv16i8;
-  } else if (Ty == LLT::fixed_vector(8, 8)) {
+  } else if (Ty == LLT::fixed_vector(8, LLT::scalar(8))) {
     Opc = ImmVal ? AArch64::SHLv8i8_shift : AArch64::USHLv8i8;
   } else {
     LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
@@ -1965,25 +1965,25 @@ bool AArch64InstructionSelector::selectVectorAshrLshr(
   unsigned NegOpc = 0;
   const TargetRegisterClass *RC =
       getRegClassForTypeOnBank(Ty, RBI.getRegBank(AArch64::FPRRegBankID));
-  if (Ty == LLT::fixed_vector(2, 64)) {
+  if (Ty == LLT::fixed_vector(2, LLT::scalar(64))) {
     Opc = IsASHR ? AArch64::SSHLv2i64 : AArch64::USHLv2i64;
     NegOpc = AArch64::NEGv2i64;
-  } else if (Ty == LLT::fixed_vector(4, 32)) {
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(32))) {
     Opc = IsASHR ? AArch64::SSHLv4i32 : AArch64::USHLv4i32;
     NegOpc = AArch64::NEGv4i32;
-  } else if (Ty == LLT::fixed_vector(2, 32)) {
+  } else if (Ty == LLT::fixed_vector(2, LLT::scalar(32))) {
     Opc = IsASHR ? AArch64::SSHLv2i32 : AArch64::USHLv2i32;
     NegOpc = AArch64::NEGv2i32;
-  } else if (Ty == LLT::fixed_vector(4, 16)) {
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(16))) {
     Opc = IsASHR ? AArch64::SSHLv4i16 : AArch64::USHLv4i16;
     NegOpc = AArch64::NEGv4i16;
-  } else if (Ty == LLT::fixed_vector(8, 16)) {
+  } else if (Ty == LLT::fixed_vector(8, LLT::scalar(16))) {
     Opc = IsASHR ? AArch64::SSHLv8i16 : AArch64::USHLv8i16;
     NegOpc = AArch64::NEGv8i16;
-  } else if (Ty == LLT::fixed_vector(16, 8)) {
+  } else if (Ty == LLT::fixed_vector(16, LLT::scalar(8))) {
     Opc = IsASHR ? AArch64::SSHLv16i8 : AArch64::USHLv16i8;
     NegOpc = AArch64::NEGv16i8;
-  } else if (Ty == LLT::fixed_vector(8, 8)) {
+  } else if (Ty == LLT::fixed_vector(8, LLT::scalar(8))) {
     Opc = IsASHR ? AArch64::SSHLv8i8 : AArch64::USHLv8i8;
     NegOpc = AArch64::NEGv8i8;
   } else {
@@ -2280,8 +2280,8 @@ bool AArch64InstructionSelector::convertPtrAddToAdd(
   if (PtrTy.getAddressSpace() != 0)
     return false;
 
-  const LLT CastPtrTy =
-      PtrTy.isVector() ? LLT::fixed_vector(2, 64) : LLT::scalar(64);
+  const LLT CastPtrTy = PtrTy.isVector() ? LLT::fixed_vector(2, LLT::scalar(64))
+                                         : LLT::scalar(64);
   auto PtrToInt = MIB.buildPtrToInt(CastPtrTy, AddOp1Reg);
   // Set regbanks on the registers.
   if (PtrTy.isVector())
@@ -3314,8 +3314,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
       I.setDesc(TII.get(TargetOpcode::COPY));
       return true;
     } else if (DstRB.getID() == AArch64::FPRRegBankID) {
-      if (DstTy == LLT::fixed_vector(4, 16) &&
-          SrcTy == LLT::fixed_vector(4, 32)) {
+      if (DstTy == LLT::fixed_vector(4, LLT::scalar(16)) &&
+          SrcTy == LLT::fixed_vector(4, LLT::scalar(32))) {
         I.setDesc(TII.get(AArch64::XTNv4i16));
         constrainSelectedInstRegOperands(I, TII, TRI, RBI);
         return true;
@@ -3644,13 +3644,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
         AArch64::GPRRegBankID)
       return false; // We expect the fpr regbank case to be imported.
     LLT VecTy = MRI.getType(I.getOperand(0).getReg());
-    if (VecTy == LLT::fixed_vector(8, 8))
+    if (VecTy == LLT::fixed_vector(8, LLT::scalar(8)))
       I.setDesc(TII.get(AArch64::DUPv8i8gpr));
-    else if (VecTy == LLT::fixed_vector(16, 8))
+    else if (VecTy == LLT::fixed_vector(16, LLT::scalar(8)))
       I.setDesc(TII.get(AArch64::DUPv16i8gpr));
-    else if (VecTy == LLT::fixed_vector(4, 16))
+    else if (VecTy == LLT::fixed_vector(4, LLT::scalar(16)))
       I.setDesc(TII.get(AArch64::DUPv4i16gpr));
-    else if (VecTy == LLT::fixed_vector(8, 16))
+    else if (VecTy == LLT::fixed_vector(8, LLT::scalar(16)))
       I.setDesc(TII.get(AArch64::DUPv8i16gpr));
     else
       return false;
@@ -4186,7 +4186,7 @@ bool AArch64InstructionSelector::selectUnmergeValues(MachineInstr &I,
     // No. We have to perform subregister inserts. For each insert, create an
     // implicit def and a subregister insert, and save the register we create.
     const TargetRegisterClass *RC = getRegClassForTypeOnBank(
-        LLT::fixed_vector(NumElts, WideTy.getScalarSizeInBits()),
+        LLT::fixed_vector(NumElts, WideTy.getScalarType()),
         *RBI.getRegBank(SrcReg, MRI, TRI));
     unsigned SubReg = 0;
     bool Found = getSubRegForClass(RC, TRI, SubReg);
@@ -6980,7 +6980,8 @@ void AArch64InstructionSelector::SelectTable(MachineInstr &I,
                                              unsigned NumVec, unsigned Opc1,
                                              unsigned Opc2, bool isExt) {
   Register DstReg = I.getOperand(0).getReg();
-  unsigned Opc = MRI.getType(DstReg) == LLT::fixed_vector(8, 8) ? Opc1 : Opc2;
+  unsigned Opc =
+      MRI.getType(DstReg) == LLT::fixed_vector(8, LLT::scalar(8)) ? Opc1 : Opc2;
 
   // Create the REG_SEQUENCE
   SmallVector<Register, 4> Regs;
@@ -7877,7 +7878,7 @@ AArch64InstructionSelector::selectExtractHigh(MachineOperand &Root) const {
     LLT SrcTy = MRI.getType(Extract->MI->getOperand(1).getReg());
     auto LaneIdx = getIConstantVRegValWithLookThrough(
         Extract->MI->getOperand(2).getReg(), MRI);
-    if (LaneIdx && SrcTy == LLT::fixed_vector(2, 64) &&
+    if (LaneIdx && SrcTy == LLT::fixed_vector(2, LLT::scalar(64)) &&
         LaneIdx->Value.getSExtValue() == 1) {
       Register ExtReg = Extract->MI->getOperand(1).getReg();
       return {{[=](MachineInstrBuilder &MIB) { MIB.addUse(ExtReg); }}};
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index f83ad7aa7460eb..3c1d30460b9f16 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -47,16 +47,16 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
   const LLT s32 = LLT::scalar(32);
   const LLT s64 = LLT::scalar(64);
   const LLT s128 = LLT::scalar(128);
-  const LLT v16s8 = LLT::fixed_vector(16, 8);
-  const LLT v8s8 = LLT::fixed_vector(8, 8);
-  const LLT v4s8 = LLT::fixed_vector(4, 8);
-  const LLT v2s8 = LLT::fixed_vector(2, 8);
-  const LLT v8s16 = LLT::fixed_vector(8, 16);
-  const LLT v4s16 = LLT::fixed_vector(4, 16);
-  const LLT v2s16 = LLT::fixed_vector(2, 16);
-  const LLT v2s32 = LLT::fixed_vector(2, 32);
-  const LLT v4s32 = LLT::fixed_vector(4, 32);
-  const LLT v2s64 = LLT::fixed_vector(2, 64);
+  const LLT v16s8 = LLT::fixed_vector(16, s8);
+  const LLT v8s8 = LLT::fixed_vector(8, s8);
+  const LLT v4s8 = LLT::fixed_vector(4, s8);
+  const LLT v2s8 = LLT::fixed_vector(2, s8);
+  const LLT v8s16 = LLT::fixed_vector(8, s16);
+  const LLT v4s16 = LLT::fixed_vector(4, s16);
+  const LLT v2s16 = LLT::fixed_vector(2, s16);
+  const LLT v2s32 = LLT::fixed_vector(2, s32);
+  const LLT v4s32 = LLT::fixed_vector(4, s32);
+  const LLT v2s64 = LLT::fixed_vector(2, s64);
   const LLT v2p0 = LLT::fixed_vector(2, p0);
 
   const LLT nxv16s8 = LLT::scalable_vector(16, s8);
@@ -1087,7 +1087,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .bitcastIf(isPointerVector(0), [=](const LegalityQuery &Query) {
         // Bitcast pointers vector to i64.
         const LLT DstTy = Query.Types[0];
-        return std::pair(0, LLT::vector(DstTy.getElementCount(), 64));
+        return std::pair(0, DstTy.changeElementType(LLT::scalar(64)));
       });
 
   getActionDefinitionsBuilder(G_CONCAT_VECTORS)
@@ -1101,10 +1101,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
             const LLT DstTy = Query.Types[0];
             const LLT SrcTy = Query.Types[1];
             return std::pair(
-                0, DstTy.changeElementSize(SrcTy.getSizeInBits())
-                       .changeElementCount(
-                           DstTy.getElementCount().divideCoefficientBy(
-                               SrcTy.getNumElements())));
+                0, DstTy.changeElementType(LLT::scalar(SrcTy.getSizeInBits()))
+                       .divide(SrcTy.getNumElements()));
           });
 
   getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({p0});
@@ -1688,10 +1686,10 @@ bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
 
     LLT MidTy, ExtTy;
     if (DstTy.isScalar() && DstTy.getScalarSizeInBits() <= 32) {
-      MidTy = LLT::fixed_vector(4, 32);
+      MidTy = LLT::fixed_vector(4, LLT::scalar(32));
       ExtTy = LLT::scalar(32);
     } else {
-      MidTy = LLT::fixed_vector(2, 64);
+      MidTy = LLT::fixed_vector(2, LLT::scalar(64));
       ExtTy = LLT::scalar(64);
     }
 
@@ -1864,7 +1862,7 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
   }
 
   unsigned PtrSize = ValTy.getElementType().getSizeInBits();
-  const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize);
+  const LLT NewTy = ValTy.changeElementType(LLT::scalar(PtrSize));
   auto &MMO = **MI.memoperands_begin();
   MMO.setType(NewTy);
 
@@ -1991,7 +1989,8 @@ bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
   // Pre-conditioning: widen Val up to the nearest vector type.
   // s32,s64,v4s16,v2s32 -> v8i8
   // v8s16,v4s32,v2s64 -> v16i8
-  LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8);
+  LLT VTy = Size == 128 ? LLT::fixed_vector(16, LLT::scalar(8))
+                        : LLT::fixed_vector(8, LLT::scalar(8));
   if (Ty.isScalar()) {
     assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!");
     if (Size == 32) {
@@ -2007,18 +2006,20 @@ bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
 
   if (ST->hasDotProd() && Ty.isVector() && Ty.getNumElements() >= 2 &&
       Ty.getScalarSizeInBits() != 16) {
-    LLT Dt = Ty == LLT::fixed_vector(2, 64) ? LLT::fixed_vector(4, 32) : Ty;
+    LLT Dt = Ty == LLT::fixed_vector(2, LLT::scalar(64))
+                 ? LLT::fixed_vector(4, LLT::scalar(32))
+                 : Ty;
     auto Zeros = MIRBuilder.buildConstant(Dt, 0);
     auto Ones = MIRBuilder.buildConstant(VTy, 1);
     MachineInstrBuilder Sum;
 
-    if (Ty == LLT::fixed_vector(2, 64)) {
+    if (Ty == LLT::fixed_vector(2, LLT::scalar(64))) {
       auto UDOT =
           MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
       Sum = MIRBuilder.buildInstr(AArch64::G_UADDLP, {Ty}, {UDOT});
-    } else if (Ty == LLT::fixed_vector(4, 32)) {
+    } else if (Ty == LLT::fixed_vector(4, LLT::scalar(32))) {
       Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
-    } else if (Ty == LLT::fixed_vector(2, 32)) {
+    } else if (Ty == LLT::fixed_vector(2, LLT::scalar(32))) {
       Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
     } else {
       llvm_unreachable("unexpected vector shape");
@@ -2035,25 +2036,25 @@ bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
   if (Ty.isScalar()) {
     Opc = Intrinsic::aarch64_neon_uaddlv;
     HAddTys.push_back(LLT::scalar(32));
-  } else if (Ty == LLT::fixed_vector(8, 16)) {
+  } else if (Ty == LLT::fixed_vector(8, LLT::scalar(16))) {
     Opc = Intrinsic::aarch64_neon_uaddlp;
-    HAddTys.push_back(LLT::fixed_vector(8, 16));
-  } else if (Ty == LLT::fixed_vector(4, 32)) {
+    HAddTys.push_back(LLT::fixed_vector(8, LLT::scalar(16)));
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(32))) {
     Opc = Intrinsic::aarch64_neon_uaddlp;
-    HAddTys.push_back(LLT::fixed_vector(8, 16));
-    HAddTys.push_back(LLT::fixed_vector(4, 32));
-  } else if (Ty == LLT::fixed_vector(2, 64)) {
+    HAddTys.push_back(LLT::fixed_vector(8, LLT::scalar(16)));
+    HAddTys.push_back(LLT::fixed_vector(4, LLT::scalar(32)));
+  } else if (Ty == LLT::fixed_vector(2, LLT::scalar(64))) {
     Opc = Intrinsic::aarch64_neon_uaddlp;
-    HAddTys.push_back(LLT::fixed_vector(8, 16));
-    HAddTys.push_back(LLT::fixed_vector(4, 32));
-    HAddTys.push_back(LLT::fixed_vector(2, 64));
-  } else if (Ty == LLT::fixed_vector(4, 16)) {
+    HAddTys.push_back(LLT::fixed_vector(8, LLT::scalar(16)));
+    HAddTys.push_back(LLT::fixed_vector(4, LLT::scalar(32)));
+    HAddTys.push_back(LLT::fixed_vector(2, LLT::scalar(64)));
+  } else if (Ty == LLT::fixed_vector(4, LLT::scalar(16))) {
     Opc = Intrinsic::aarch64_neon_uaddlp;
-    HAddTys.push_back(LLT::fixed_vector(4, 16));
-  } else if (Ty == LLT::fixed_vector(2, 32)) {
+    HAddTys.push_back(LLT::fixed_vector(4, LLT::scalar(16)));
+  } else if (Ty == LLT::fixed_vector(2, LLT::scalar(32))) {
     Opc = Intrinsic::aarch64_neon_uaddlp;
-    HAddTys.push_back(LLT::fixed_vector(4, 16));
-    HAddTys.push_back(LLT::fixed_vector(2, 32));
+    HAddTys.push_back(LLT::fixed_vector(4, LLT::scalar(16)));
+    HAddTys.push_back(LLT::fixed_vector(2, LLT::scalar(32)));
   } else
     llvm_unreachable("unexpected vector shape");
   MachineInstrBuilder UADD;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index 28d9f4f50f3883..8b84b81889c8f5 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -388,9 +388,11 @@ bool matchCombineMulCMLT(MachineInstr &MI, MachineRegisterInfo &MRI,
                          Register &SrcReg) {
   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
 
-  if (DstTy != LLT::fixed_vector(2, 64) && DstTy != LLT::fixed_vector(2, 32) &&
-      DstTy != LLT::fixed_vector(4, 32) && DstTy != LLT::fixed_vector(4, 16) &&
-      DstTy != LLT::fixed_vector(8, 16))
+  if (DstTy != LLT::fixed_vector(2, LLT::scalar(64)) &&
+      DstTy != LLT::fixed_vector(2, LLT::scalar(32)) &&
+      DstTy != LLT::fixed_vector(4, LLT::scalar(32)) &&
+      DstTy != LLT::fixed_vector(4, LLT::scalar(16)) &&
+      DstTy != LLT::fixed_vector(8, LLT::scalar(16)))
     return false;
 
   auto AndMI = getDefIgnoringCopies(MI.getOperand(1).getReg(), MRI);
@@ -423,9 +425,8 @@ void applyCombineMulCMLT(MachineInstr &MI, MachineRegisterInfo &MRI,
                          MachineIRBuilder &B, Register &SrcReg) {
   Register DstReg = MI.getOperand(0).getReg();
   LLT DstTy = MRI.getType(DstReg);
-  LLT HalfTy =
-      DstTy.changeElementCount(DstTy.getElementCount().multiplyCoefficientBy(2))
-          .changeElementSize(DstTy.getScalarSizeInBits() / 2);
+  LLT HalfTy = DstTy.multiplyElements(2).changeElementType(
+      LLT::scalar(DstTy.getScalarSizeInBits() / 2));
 
   Register ZeroVec = B.buildConstant(HalfTy, 0).getReg(0);
   Register CastReg =
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 56d70ffdece713..9c972c0ed2cf5b 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -1196,7 +1196,7 @@ bool matchExtMulToMULL(MachineInstr &MI, MachineRegisterInfo &MRI) {
       return true;
     }
     // If result type is v2s64, scalarise the instruction
-    else if (DstTy == LLT::fixed_vector(2, 64)) {
+    else if (DstTy == LLT::fixed_vector(2, LLT::scalar(64))) {
       return true;
     }
   }
@@ -1231,7 +1231,7 @@ void applyExtMulToMULL(MachineInstr &MI, MachineRegisterInfo &MRI,
     MI.eraseFromParent();
   }
   // If result type is v2s64, scalarise the instruction
-  else if (DstTy == LLT::fixed_vector(2, 64)) {
+  else if (DstTy == LLT::fixed_vector(2, LLT::scalar(64))) {
     LegalizerHelper Helper(*MI.getMF(), Observer, B);
     B.setInstrAndDebugLoc(MI);
     Helper.fewerElementsVector(
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 80459827c30f3f..d44aa3bdb68962 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -316,10 +316,10 @@ void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
   unsigned NumOfDotMI;
   if (SrcTy.getNumElements() % 16 == 0) {
     NumOfDotMI = SrcTy.getNumElements() / 16;
-    MidTy = LLT::fixed_vector(4, 32);
+    MidTy = LLT::fixed_vector(4, LLT::scalar(32));
   } else if (SrcTy.getNumElements() % 8 == 0) {
     NumOfDotMI = SrcTy.getNumElements() / 8;
-    MidTy = LLT::fixed_vector(2, 32);
+    MidTy = LLT::fixed_vector(2, LLT::scalar(32));
   } else {
     llvm_unreachable("Source type number of elements is not multiple of 8");
   }
@@ -339,7 +339,7 @@ void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
       SmallVector<Register> Leftover2;
 
       // Split the elements into v16i8 and v8i8
-      LLT MainTy = LLT::fixed_vector(16, 8);
+      LLT MainTy = LLT::fixed_vector(16, LLT::scalar(8));
       LLT LeftoverTy1, LeftoverTy2;
       if ((!extractParts(Ext1SrcReg, MRI.getType(Ext1SrcReg), MainTy,
                          LeftoverTy1, Ext1UnmergeReg, Leftover1, Builder,
@@ -351,28 +351,29 @@ void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
       }
 
       // Pad the leftover v8i8 vector with register of 0s of type v8i8
-      Register v8Zeroes = Builder.buildConstant(LLT::fixed_vector(8, 8), 0)
-                              ->getOperand(0)
-                              .getReg();
+      Register v8Zeroes =
+          Builder.buildConstant(LLT::fixed_vector(8, LLT::scalar(8)), 0)
+              ->getOperand(0)
+              .getReg();
 
       Ext1UnmergeReg.push_back(
           Builder
-              .buildMergeLikeInstr(LLT::fixed_vector(16, 8),
+              .buildMergeLikeInstr(LLT::fixed_vector(16, LLT::scalar(8)),
                                    {Leftover1[0], v8Zeroes})
               .getReg(0));
       Ext2UnmergeReg.push_back(
           Builder
-              .buildMergeLikeInstr(LLT::fixed_vector(16, 8),
+              .buildMergeLikeInstr(LLT::fixed_vector(16, LLT::scalar(8)),
                                    {Leftover2[0], v8Zeroes})
               .getReg(0));
 
     } else {
       // Unmerge the source vectors to v16i8
       unsigned SrcNumElts = SrcTy.getNumElements();
-      extractParts(Ext1SrcReg, LLT::fixed_vector(16, 8), SrcNumElts / 16,
-                   Ext1UnmergeReg, Builder, MRI);
-      extractParts(Ext2SrcReg, LLT::fixed_vector(16, 8), SrcNumElts / 16,
-                   Ext2UnmergeReg, Builder, MRI);
+      extractParts(Ext1SrcReg, LLT::fixed_vector(16, LLT::scalar(8)),
+                   SrcNumElts / 16, Ext1UnmergeReg, Builder, MRI);
+      extractParts(Ext2SrcReg, LLT::fixed_vector(16, LLT::scalar(8)),
+                   SrcNumElts / 16, Ext2UnmergeReg, Builder, MRI);
     }
 
     // Build the UDOT instructions
@@ -382,10 +383,10 @@ void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
       LLT ZeroesLLT;
       // Check if it is 16 or 8 elements. Set Zeroes to the according size
       if (MRI.getType(Ext1UnmergeReg[i]).getNumElements() == 16) {
-        ZeroesLLT = LLT::fixed_vector(4, 32);
+        ZeroesLLT = LLT::fixed_vector(4, LLT::scalar(32));
         NumElements += 4;
       } else {
-        ZeroesLLT = LLT::fixed_vector(2, 32);
+        ZeroesLLT = LLT::fixed_vector(2, LLT::scalar(32));
         NumElements += 2;
       }
       auto Zeroes = Builder.buildConstant(ZeroesLLT, 0)->getOperand(0).getReg();
@@ -397,8 +398,8 @@ void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
     }
 
     // Merge the output
-    auto ConcatMI =
-        Builder.buildConcatVectors(LLT::fixed_vector(NumElements, 32), DotReg);
+    auto ConcatMI = Builder.buildConcatVectors(
+        LLT::fixed_vector(NumElements, LLT::scalar(32)), DotReg);
 
     // Put it through a vector reduction
     Builder.buildVecReduceAdd(MI.getOperand(0).getReg(),
@@ -468,11 +469,11 @@ void applyExtUaddvToUaddlv(MachineInstr &MI, MachineRegisterInfo &MRI,
     LLT LeftoverTy;
     SmallVector<Register, 4> LeftoverRegs;
     if (SrcScalSize == 8)
-      MainTy = LLT::fixed_vector(16, 8);
+      MainTy = LLT::fixed_vector(16, LLT::scalar(8));
     else if (SrcScalSize == 16)
-      MainTy = LLT::fixed_vector(8, 16);
+      MainTy = LLT::fixed_vector(8, LLT::scalar(16));
     else if (SrcScalSize == 32)
-      MainTy = LLT::fixed_vector(4, 32);
+      MainTy = LLT::fixed_vector(4, LLT::scalar(32));
     else
       llvm_unreachable("Source's Scalar Size not supported");
 
@@ -500,14 +501,15 @@ void applyExtUaddvToUaddlv(MachineInstr &MI, MachineRegisterInfo &MRI,
       WorkingRegisters[I] =
           B.buildInstr(std::get<1>(MatchInfo) ? TargetOpcode::G_SEXT
                                               : TargetOpcode::G_ZEXT,
-                       {LLT::fixed_vector(4, 16)}, {WorkingRegisters[I]})
+                       {LLT::fixed_vector(4, LLT::scalar(16))},
+                       {WorkingRegisters[I]})
               .getReg(0);
     }
 
     // Generate the {U/S}ADDLV instruction, whose output is always double of the
     // Src's Scalar size
-    LLT addlvTy = MidScalarSize <= 32 ? LLT::fixed_vector(4, 32)
-                                      : LLT::fixed_vector(2, 64);
+    LLT addlvTy = MidScalarSize <= 32 ? LLT::fixed_vector(4, LLT::scalar(32))
+                                      : LLT::fixed_vector(2, LLT::scalar(64));
     Register addlvReg =
         B.buildInstr(Opc, {addlvTy}, {WorkingRegisters[I]}).getReg(0);
 
@@ -586,7 +588,8 @@ void applyPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
                         MachineIRBuilder &B, bool isSExt, Register DstReg,
                         Register SrcReg1, Register SrcReg2) {
   LLT SrcTy = MRI.getType(SrcReg1);
-  LLT MidTy = SrcTy.changeElementSize(SrcTy.getScalarSizeInBits() * 2);
+  LLT MidTy =
+      SrcTy.changeElementType(LLT::scalar(SrcTy.getScalarSizeInBits() * 2));
   unsigned Opc = isSExt ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
   Register Ext1Reg = B.buildInstr(Opc, {MidTy}, {SrcReg1}).getReg(0);
   Register Ext2Reg = B.buildInstr(Opc, {MidTy}, {SrcReg2}).getReg(0);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
index d158f0f58d711e..14c0dedb6d0dde 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
@@ -92,7 +92,8 @@ AMDGPUFunctionArgInfo::getPreloadedValue(
   switch (Value) {
   case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: {
     return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr,
-                      &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32));
+                      &AMDGPU::SGPR_128RegClass,
+                      LLT::fixed_vector(4, LLT::scalar(32)));
   }
   case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR:
     return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index bb00442342d843..180590d7450643 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1159,8 +1159,8 @@ void AMDGPUCallLowering::handleImplicitCallArguments(
   if (!ST.enableFlatScratch()) {
     // Insert copies for the SRD. In the HSA case, this should be an identity
     // copy.
-    auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32),
-                                               FuncInfo.getScratchRSrcReg());
+    auto ScratchRSrcReg = MIRBuilder.buildCopy(
+        LLT::fixed_vector(4, LLT::scalar(32)), FuncInfo.getScratchRSrcReg());
 
     auto CalleeRSrcReg = AMDGPU::isChainCC(CalleeCC)
                              ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 3be865f03df1fd..cf9711a1908fef 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -651,7 +651,7 @@ bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
   // Selection logic below is for V2S16 only.
   // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
   Register Dst = MI.getOperand(0).getReg();
-  if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
+  if (MRI->getType(Dst) != LLT::fixed_vector(2, LLT::scalar(16)) ||
       (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
        SrcTy != LLT::scalar(32)))
     return selectImpl(MI, *CoverageInfo);
@@ -2347,7 +2347,8 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
     return true;
   }
 
-  if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
+  if (DstTy == LLT::fixed_vector(2, LLT::scalar(16)) &&
+      SrcTy == LLT::fixed_vector(2, LLT::scalar(32))) {
     MachineBasicBlock *MBB = I.getParent();
     const DebugLoc &DL = I.getDebugLoc();
 
@@ -2640,7 +2641,7 @@ static bool isExtractHiElt(MachineRegisterInfo &MRI, Register In,
     return false;
 
   assert(MRI.getType(Shuffle->getOperand(0).getReg()) ==
-         LLT::fixed_vector(2, 16));
+         LLT::fixed_vector(2, LLT::scalar(16)));
 
   ArrayRef<int> Mask = Shuffle->getOperand(3).getShuffleMask();
   assert(Mask.size() == 2);
@@ -4200,7 +4201,7 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(
   if (MI->getOpcode() == AMDGPU::G_FNEG &&
       // It's possible to see an f32 fneg here, but unlikely.
       // TODO: Treat f32 fneg as only high bit.
-      MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
+      MRI.getType(Src) == LLT::fixed_vector(2, LLT::scalar(16))) {
     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
     Src = MI->getOperand(1).getReg();
     MI = MRI.getVRegDef(Src);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 9836e10c36bc5d..0c69acfd4b8f2e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -101,9 +101,8 @@ static LegalityPredicate isWideVec16(unsigned TypeIdx) {
 static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
-    const LLT EltTy = Ty.getElementType();
-    return std::pair(TypeIdx,
-                     LLT::fixed_vector(Ty.getNumElements() + 1, EltTy));
+    return std::pair(TypeIdx, LLT::fixed_vector(Ty.getNumElements() + 1,
+                                                Ty.getElementType()));
   };
 }
 
@@ -182,7 +181,8 @@ static LLT getBitcastRegisterType(const LLT Ty) {
     return LLT::scalar(Size);
   }
 
-  return LLT::scalarOrVector(ElementCount::getFixed(Size / 32), 32);
+  return LLT::scalarOrVector(ElementCount::getFixed(Size / 32),
+                             LLT::scalar(32));
 }
 
 static LegalizeMutation bitcastToRegisterType(unsigned TypeIdx) {
@@ -197,8 +197,9 @@ static LegalizeMutation bitcastToVectorElement32(unsigned TypeIdx) {
     const LLT Ty = Query.Types[TypeIdx];
     unsigned Size = Ty.getSizeInBits();
     assert(Size % 32 == 0);
-    return std::pair(
-        TypeIdx, LLT::scalarOrVector(ElementCount::getFixed(Size / 32), 32));
+    return std::pair(TypeIdx,
+                     LLT::scalarOrVector(ElementCount::getFixed(Size / 32),
+                                         LLT::scalar(32)));
   };
 }
 
@@ -296,43 +297,43 @@ static const LLT S512 = LLT::scalar(512);
 static const LLT S1024 = LLT::scalar(1024);
 static const LLT MaxScalar = LLT::scalar(MaxRegisterSize);
 
-static const LLT V2S8 = LLT::fixed_vector(2, 8);
-static const LLT V2S16 = LLT::fixed_vector(2, 16);
-static const LLT V4S16 = LLT::fixed_vector(4, 16);
-static const LLT V6S16 = LLT::fixed_vector(6, 16);
-static const LLT V8S16 = LLT::fixed_vector(8, 16);
-static const LLT V10S16 = LLT::fixed_vector(10, 16);
-static const LLT V12S16 = LLT::fixed_vector(12, 16);
-static const LLT V16S16 = LLT::fixed_vector(16, 16);
+static const LLT V2S8 = LLT::fixed_vector(2, S8);
+static const LLT V2S16 = LLT::fixed_vector(2, S16);
+static const LLT V4S16 = LLT::fixed_vector(4, S16);
+static const LLT V6S16 = LLT::fixed_vector(6, S16);
+static const LLT V8S16 = LLT::fixed_vector(8, S16);
+static const LLT V10S16 = LLT::fixed_vector(10, S16);
+static const LLT V12S16 = LLT::fixed_vector(12, S16);
+static const LLT V16S16 = LLT::fixed_vector(16, S16);
 
 static const LLT V2F16 = LLT::fixed_vector(2, LLT::float16());
 static const LLT V2BF16 = V2F16; // FIXME
 
-static const LLT V2S32 = LLT::fixed_vector(2, 32);
-static const LLT V3S32 = LLT::fixed_vector(3, 32);
-static const LLT V4S32 = LLT::fixed_vector(4, 32);
-static const LLT V5S32 = LLT::fixed_vector(5, 32);
-static const LLT V6S32 = LLT::fixed_vector(6, 32);
-static const LLT V7S32 = LLT::fixed_vector(7, 32);
-static const LLT V8S32 = LLT::fixed_vector(8, 32);
-static const LLT V9S32 = LLT::fixed_vector(9, 32);
-static const LLT V10S32 = LLT::fixed_vector(10, 32);
-static const LLT V11S32 = LLT::fixed_vector(11, 32);
-static const LLT V12S32 = LLT::fixed_vector(12, 32);
-static const LLT V16S32 = LLT::fixed_vector(16, 32);
-static const LLT V32S32 = LLT::fixed_vector(32, 32);
-
-static const LLT V2S64 = LLT::fixed_vector(2, 64);
-static const LLT V3S64 = LLT::fixed_vector(3, 64);
-static const LLT V4S64 = LLT::fixed_vector(4, 64);
-static const LLT V5S64 = LLT::fixed_vector(5, 64);
-static const LLT V6S64 = LLT::fixed_vector(6, 64);
-static const LLT V7S64 = LLT::fixed_vector(7, 64);
-static const LLT V8S64 = LLT::fixed_vector(8, 64);
-static const LLT V16S64 = LLT::fixed_vector(16, 64);
-
-static const LLT V2S128 = LLT::fixed_vector(2, 128);
-static const LLT V4S128 = LLT::fixed_vector(4, 128);
+static const LLT V2S32 = LLT::fixed_vector(2, S32);
+static const LLT V3S32 = LLT::fixed_vector(3, S32);
+static const LLT V4S32 = LLT::fixed_vector(4, S32);
+static const LLT V5S32 = LLT::fixed_vector(5, S32);
+static const LLT V6S32 = LLT::fixed_vector(6, S32);
+static const LLT V7S32 = LLT::fixed_vector(7, S32);
+static const LLT V8S32 = LLT::fixed_vector(8, S32);
+static const LLT V9S32 = LLT::fixed_vector(9, S32);
+static const LLT V10S32 = LLT::fixed_vector(10, S32);
+static const LLT V11S32 = LLT::fixed_vector(11, S32);
+static const LLT V12S32 = LLT::fixed_vector(12, S32);
+static const LLT V16S32 = LLT::fixed_vector(16, S32);
+static const LLT V32S32 = LLT::fixed_vector(32, S32);
+
+static const LLT V2S64 = LLT::fixed_vector(2, S64);
+static const LLT V3S64 = LLT::fixed_vector(3, S64);
+static const LLT V4S64 = LLT::fixed_vector(4, S64);
+static const LLT V5S64 = LLT::fixed_vector(5, S64);
+static const LLT V6S64 = LLT::fixed_vector(6, S64);
+static const LLT V7S64 = LLT::fixed_vector(7, S64);
+static const LLT V8S64 = LLT::fixed_vector(8, S64);
+static const LLT V16S64 = LLT::fixed_vector(16, S64);
+
+static const LLT V2S128 = LLT::fixed_vector(2, S128);
+static const LLT V4S128 = LLT::fixed_vector(4, S128);
 
 static std::initializer_list<LLT> AllScalarTypes = {
     S32, S64, S96, S128, S160, S192, S224, S256, S512, S1024};
@@ -1815,9 +1816,11 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
           const unsigned VecSize = VecTy.getSizeInBits();
 
           const unsigned TargetEltSize = DstEltSize % 64 == 0 ? 64 : 32;
+          LLT TargetEltTy =
+              DstEltSize % 64 == 0 ? LLT::scalar(64) : LLT::scalar(32);
           return std::pair(
               VecTypeIdx,
-              LLT::fixed_vector(VecSize / TargetEltSize, TargetEltSize));
+              LLT::fixed_vector(VecSize / TargetEltSize, TargetEltTy));
         })
       .clampScalar(EltTypeIdx, S32, S64)
       .clampScalar(VecTypeIdx, S32, S64)
@@ -3856,7 +3859,7 @@ bool AMDGPULegalizerInfo::legalizeBuildVector(
   Register Dst = MI.getOperand(0).getReg();
   const LLT S32 = LLT::scalar(32);
   const LLT S16 = LLT::scalar(16);
-  assert(MRI.getType(Dst) == LLT::fixed_vector(2, 16));
+  assert(MRI.getType(Dst) == LLT::fixed_vector(2, S16));
 
   Register Src0 = MI.getOperand(1).getReg();
   Register Src1 = MI.getOperand(2).getReg();
@@ -6118,7 +6121,7 @@ bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
     B.setInsertPt(B.getMBB(), ++B.getInsertPt());
     B.buildTrunc(Dst, LoadDstReg);
   } else if (Unpacked && IsD16 && Ty.isVector()) {
-    LLT UnpackedTy = Ty.changeElementSize(32);
+    LLT UnpackedTy = Ty.changeElementType(S32);
     Register LoadDstReg = B.getMRI()->createGenericVirtualRegister(UnpackedTy);
     buildBufferLoad(Opc, LoadDstReg, RSrc, VIndex, VOffset, SOffset, ImmOffset,
                     Format, AuxiliaryData, MMO, IsTyped, HasVIndex, B);
@@ -6301,7 +6304,7 @@ static void packImage16bitOpsToDwords(MachineIRBuilder &B, MachineInstr &MI,
                                       const AMDGPU::ImageDimIntrinsicInfo *Intr,
                                       bool IsA16, bool IsG16) {
   const LLT S16 = LLT::scalar(16);
-  const LLT V2S16 = LLT::fixed_vector(2, 16);
+  const LLT V2S16 = LLT::fixed_vector(2, S16);
   auto EndIdx = Intr->VAddrEnd;
 
   for (unsigned I = Intr->VAddrStart; I < EndIdx; I++) {
@@ -6372,7 +6375,7 @@ static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
   int NumAddrRegs = AddrRegs.size();
   if (NumAddrRegs != 1) {
     auto VAddr =
-        B.buildBuildVector(LLT::fixed_vector(NumAddrRegs, 32), AddrRegs);
+        B.buildBuildVector(LLT::fixed_vector(NumAddrRegs, S32), AddrRegs);
     MI.getOperand(DimIdx).setReg(VAddr.getReg(0));
   }
 
@@ -6414,7 +6417,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
   MachineRegisterInfo *MRI = B.getMRI();
   const LLT S32 = LLT::scalar(32);
   const LLT S16 = LLT::scalar(16);
-  const LLT V2S16 = LLT::fixed_vector(2, 16);
+  const LLT V2S16 = LLT::fixed_vector(2, S16);
 
   unsigned DMask = 0;
   Register VData;
@@ -6530,13 +6533,13 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
     if (UsePartialNSA) {
       // Pack registers that would go over NSAMaxSize into last VAddr register
       LLT PackedAddrTy =
-          LLT::fixed_vector(2 * (PackedRegs.size() - NSAMaxSize + 1), 16);
+          LLT::fixed_vector(2 * (PackedRegs.size() - NSAMaxSize + 1), S16);
       auto Concat = B.buildConcatVectors(
           PackedAddrTy, ArrayRef(PackedRegs).slice(NSAMaxSize - 1));
       PackedRegs[NSAMaxSize - 1] = Concat.getReg(0);
       PackedRegs.resize(NSAMaxSize);
     } else if (!UseNSA && PackedRegs.size() > 1) {
-      LLT PackedAddrTy = LLT::fixed_vector(2 * PackedRegs.size(), 16);
+      LLT PackedAddrTy = LLT::fixed_vector(2 * PackedRegs.size(), S16);
       auto Concat = B.buildConcatVectors(PackedAddrTy, PackedRegs);
       PackedRegs[0] = Concat.getReg(0);
       PackedRegs.resize(1);
@@ -6645,15 +6648,15 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
 
   if (IsD16 && ST.hasUnpackedD16VMem()) {
     RoundedTy =
-        LLT::scalarOrVector(ElementCount::getFixed(AdjustedNumElts), 32);
-    TFETy = LLT::fixed_vector(AdjustedNumElts + 1, 32);
+        LLT::scalarOrVector(ElementCount::getFixed(AdjustedNumElts), S32);
+    TFETy = LLT::fixed_vector(AdjustedNumElts + 1, S32);
     RegTy = S32;
   } else {
     unsigned EltSize = EltTy.getSizeInBits();
     unsigned RoundedElts = (AdjustedTy.getSizeInBits() + 31) / 32;
     unsigned RoundedSize = 32 * RoundedElts;
-    RoundedTy = LLT::scalarOrVector(
-        ElementCount::getFixed(RoundedSize / EltSize), EltSize);
+    RoundedTy =
+        EltTy.changeElementCount(ElementCount::getFixed(RoundedSize / EltSize));
     TFETy = LLT::fixed_vector(RoundedSize / 32 + 1, S32);
     RegTy = !IsTFE && EltSize == 16 ? V2S16 : S32;
   }
@@ -6768,13 +6771,13 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
   const int RegsToCover = (Ty.getSizeInBits() + 31) / 32;
 
   // Deal with the one annoying legal case.
-  const LLT V3S16 = LLT::fixed_vector(3, 16);
+  const LLT V3S16 = LLT::fixed_vector(3, S16);
   if (Ty == V3S16) {
     if (IsTFE) {
       if (ResultRegs.size() == 1) {
         NewResultReg = ResultRegs[0];
       } else if (ResultRegs.size() == 2) {
-        LLT V4S16 = LLT::fixed_vector(4, 16);
+        LLT V4S16 = LLT::fixed_vector(4, S16);
         NewResultReg = B.buildConcatVectors(V4S16, ResultRegs).getReg(0);
       } else {
         return false;
@@ -7025,8 +7028,8 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
   MachineRegisterInfo &MRI = *B.getMRI();
   const LLT S16 = LLT::scalar(16);
   const LLT S32 = LLT::scalar(32);
-  const LLT V2S16 = LLT::fixed_vector(2, 16);
-  const LLT V3S32 = LLT::fixed_vector(3, 32);
+  const LLT V2S16 = LLT::fixed_vector(2, S16);
+  const LLT V3S32 = LLT::fixed_vector(3, S32);
 
   Register DstReg = MI.getOperand(0).getReg();
   Register NodePtr = MI.getOperand(2).getReg();
@@ -7151,7 +7154,7 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
 
   if (!UseNSA) {
     // Build a single vector containing all the operands so far prepared.
-    LLT OpTy = LLT::fixed_vector(Ops.size(), 32);
+    LLT OpTy = LLT::fixed_vector(Ops.size(), S32);
     Register MergedOps = B.buildMergeLikeInstr(OpTy, Ops).getReg(0);
     Ops.clear();
     Ops.push_back(MergedOps);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
index ff8189ce31f7f7..e85cf0a9402491 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
@@ -186,7 +186,7 @@ void AMDGPUPreLegalizerCombinerImpl::applyClampI64ToI16(
 
   assert(MI.getOpcode() != AMDGPU::G_AMDGPU_CVT_PK_I16_I32);
 
-  const LLT V2S16 = LLT::fixed_vector(2, 16);
+  const LLT V2S16 = LLT::fixed_vector(2, LLT::scalar(16));
   auto CvtPk =
       B.buildInstr(AMDGPU::G_AMDGPU_CVT_PK_I16_I32, {V2S16},
                    {Unmerge.getReg(0), Unmerge.getReg(1)}, MI.getFlags());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index c05f079516ba68..876989228de31d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2505,9 +2505,12 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
       return;
     }
 
+    const LLT S16 = LLT::scalar(16);
+    const LLT S32 = LLT::scalar(32);
+
     // 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
     // Packed 16-bit operations need to be scalarized and promoted.
-    if (DstTy != LLT::scalar(16) && DstTy != LLT::fixed_vector(2, 16))
+    if (DstTy != LLT::scalar(16) && DstTy != LLT::fixed_vector(2, S16))
       break;
 
     const RegisterBank *DstBank =
@@ -2515,7 +2518,6 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     if (DstBank == &AMDGPU::VGPRRegBank)
       break;
 
-    const LLT S32 = LLT::scalar(32);
     MachineBasicBlock *MBB = MI.getParent();
     MachineFunction *MF = MBB->getParent();
     ApplyRegBankMapping ApplySALU(B, *this, MRI, &AMDGPU::SGPRRegBank);
@@ -2882,7 +2884,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
 
     assert(DstTy.getSizeInBits() == 64);
 
-    LLT Vec32 = LLT::fixed_vector(2 * SrcTy.getNumElements(), 32);
+    LLT Vec32 = LLT::fixed_vector(2 * SrcTy.getNumElements(), S32);
 
     auto CastSrc = B.buildBitcast(Vec32, SrcReg);
     auto One = B.buildConstant(S32, 1);
@@ -2997,7 +2999,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     assert(InsTy.getSizeInBits() == 64);
 
     const LLT S32 = LLT::scalar(32);
-    LLT Vec32 = LLT::fixed_vector(2 * VecTy.getNumElements(), 32);
+    LLT Vec32 = LLT::fixed_vector(2 * VecTy.getNumElements(), S32);
 
     auto CastSrc = B.buildBitcast(Vec32, SrcReg);
     auto One = B.buildConstant(S32, 1);
@@ -4148,7 +4150,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case AMDGPU::G_BUILD_VECTOR:
   case AMDGPU::G_BUILD_VECTOR_TRUNC: {
     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
-    if (DstTy == LLT::fixed_vector(2, 16)) {
+    if (DstTy == LLT::fixed_vector(2, LLT::scalar(16))) {
       unsigned DstSize = DstTy.getSizeInBits();
       unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
       unsigned Src0BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7da93f90341d22..afd9dc93127fda 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5658,8 +5658,8 @@ MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
 
 LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const {
   return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts())
-             ? Ty.changeElementSize(16)
-             : Ty.changeElementSize(32);
+             ? Ty.changeElementType(LLT::scalar(16))
+             : Ty.changeElementType(LLT::scalar(32));
 }
 
 // Answering this is somewhat tricky and depends on the specific device which
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 8468dd6a22119f..239dba35ba5cca 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -73,10 +73,10 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
   const LLT s16 = LLT::scalar(16);
   const LLT s32 = LLT::scalar(32);
   const LLT s64 = LLT::scalar(64);
-  const LLT v16s8 = LLT::fixed_vector(16, 8);
-  const LLT v8s16 = LLT::fixed_vector(8, 16);
-  const LLT v4s32 = LLT::fixed_vector(4, 32);
-  const LLT v2s64 = LLT::fixed_vector(2, 64);
+  const LLT v16s8 = LLT::fixed_vector(16, s8);
+  const LLT v8s16 = LLT::fixed_vector(8, s16);
+  const LLT v4s32 = LLT::fixed_vector(4, s32);
+  const LLT v2s64 = LLT::fixed_vector(2, s64);
   const LLT p0 = LLT::pointer(0, 32);
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
diff --git a/llvm/lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp b/llvm/lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp
index afc8f6bbde1b7e..460d814906e1ac 100644
--- a/llvm/lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp
+++ b/llvm/lib/Target/PowerPC/GISel/PPCLegalizerInfo.cpp
@@ -44,10 +44,10 @@ PPCLegalizerInfo::PPCLegalizerInfo(const PPCSubtarget &ST) {
   const LLT S16 = LLT::scalar(16);
   const LLT S32 = LLT::scalar(32);
   const LLT S64 = LLT::scalar(64);
-  const LLT V16S8 = LLT::fixed_vector(16, 8);
-  const LLT V8S16 = LLT::fixed_vector(8, 16);
-  const LLT V4S32 = LLT::fixed_vector(4, 32);
-  const LLT V2S64 = LLT::fixed_vector(2, 64);
+  const LLT V16S8 = LLT::fixed_vector(16, S8);
+  const LLT V8S16 = LLT::fixed_vector(8, S16);
+  const LLT V4S32 = LLT::fixed_vector(4, S32);
+  const LLT V2S64 = LLT::fixed_vector(2, S64);
   getActionDefinitionsBuilder(G_IMPLICIT_DEF).legalFor({S64});
   getActionDefinitionsBuilder(G_CONSTANT)
       .legalFor({S32, S64})
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 21bfb444a569d4..d19149a8cdaf52 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -661,14 +661,14 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       // We don't have the ability to slide mask vectors down indexed by their
       // i1 elements; the smallest we can do is i8. Often we are able to bitcast
       // to equivalent i8 vectors.
-      .bitcastIf(
-          all(typeIsLegalBoolVec(0, BoolVecTys, ST),
-              typeIsLegalBoolVec(1, BoolVecTys, ST), ExtractSubvecBitcastPred),
-          [=](const LegalityQuery &Query) {
-            LLT CastTy = LLT::vector(
-                Query.Types[0].getElementCount().divideCoefficientBy(8), 8);
-            return std::pair(0, CastTy);
-          })
+      .bitcastIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+                     typeIsLegalBoolVec(1, BoolVecTys, ST),
+                     ExtractSubvecBitcastPred),
+                 [=](const LegalityQuery &Query) {
+                   LLT Ty = Query.Types[0];
+                   LLT CastTy = Ty.divide(8).changeElementType(LLT::scalar(8));
+                   return std::pair(0, CastTy);
+                 })
       .customIf(LegalityPredicates::any(
           all(typeIsLegalBoolVec(0, BoolVecTys, ST),
               typeIsLegalBoolVec(1, BoolVecTys, ST)),
@@ -928,7 +928,7 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
   // Calculate the new vector type with i8 elements
   unsigned NumElements =
       DataTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
-  LLT NewDataTy = LLT::scalable_vector(NumElements, 8);
+  LLT NewDataTy = LLT::scalable_vector(NumElements, LLT::scalar(8));
 
   Helper.bitcast(MI, 0, NewDataTy);
 
@@ -1172,10 +1172,8 @@ bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,
     auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
     auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
     if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
-      return Helper.bitcast(
-          IS, 0,
-          LLT::vector(BigTy.getElementCount().divideCoefficientBy(8), 8));
-
+      return Helper.bitcast(IS, 0,
+                            BigTy.divide(8).changeElementType(LLT::scalar(8)));
     // We can't slide this mask vector up indexed by its i1 elements.
     // This poses a problem when we wish to insert a scalable vector which
     // can't be re-expressed as a larger type. Just choose the slow path and
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index bab7fe9d25e441..93ee442220180c 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -53,26 +53,25 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
   const LLT s80 = LLT::scalar(80);
   const LLT s128 = LLT::scalar(128);
   const LLT sMaxScalar = Subtarget.is64Bit() ? s64 : s32;
-  const LLT v2s32 = LLT::fixed_vector(2, 32);
-  const LLT v4s8 = LLT::fixed_vector(4, 8);
+  const LLT v2s32 = LLT::fixed_vector(2, s32);
+  const LLT v4s8 = LLT::fixed_vector(4, s8);
 
-
-  const LLT v16s8 = LLT::fixed_vector(16, 8);
-  const LLT v8s16 = LLT::fixed_vector(8, 16);
-  const LLT v4s32 = LLT::fixed_vector(4, 32);
-  const LLT v2s64 = LLT::fixed_vector(2, 64);
+  const LLT v16s8 = LLT::fixed_vector(16, s8);
+  const LLT v8s16 = LLT::fixed_vector(8, s16);
+  const LLT v4s32 = LLT::fixed_vector(4, s32);
+  const LLT v2s64 = LLT::fixed_vector(2, s64);
   const LLT v2p0 = LLT::fixed_vector(2, p0);
 
-  const LLT v32s8 = LLT::fixed_vector(32, 8);
-  const LLT v16s16 = LLT::fixed_vector(16, 16);
-  const LLT v8s32 = LLT::fixed_vector(8, 32);
-  const LLT v4s64 = LLT::fixed_vector(4, 64);
+  const LLT v32s8 = LLT::fixed_vector(32, s8);
+  const LLT v16s16 = LLT::fixed_vector(16, s16);
+  const LLT v8s32 = LLT::fixed_vector(8, s32);
+  const LLT v4s64 = LLT::fixed_vector(4, s64);
   const LLT v4p0 = LLT::fixed_vector(4, p0);
 
-  const LLT v64s8 = LLT::fixed_vector(64, 8);
-  const LLT v32s16 = LLT::fixed_vector(32, 16);
-  const LLT v16s32 = LLT::fixed_vector(16, 32);
-  const LLT v8s64 = LLT::fixed_vector(8, 64);
+  const LLT v64s8 = LLT::fixed_vector(64, s8);
+  const LLT v32s16 = LLT::fixed_vector(32, s16);
+  const LLT v16s32 = LLT::fixed_vector(16, s32);
+  const LLT v8s64 = LLT::fixed_vector(8, s64);
 
   const LLT s8MaxVector = HasAVX512 ? v64s8 : HasAVX ? v32s8 : v16s8;
   const LLT s16MaxVector = HasAVX512 ? v32s16 : HasAVX ? v16s16 : v8s16;
diff --git a/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp b/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
index 822707a1f4ed32..e3bb75cf676b17 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
@@ -78,18 +78,18 @@ TEST_F(AArch64GISelMITest, TestCSE) {
   // Check G_BUILD_VECTOR
   Register Reg1 = MRI->createGenericVirtualRegister(s32);
   Register Reg2 = MRI->createGenericVirtualRegister(s32);
-  auto BuildVec1 =
-      CSEB.buildBuildVector(LLT::fixed_vector(4, 32), {Reg1, Reg2, Reg1, Reg2});
-  auto BuildVec2 =
-      CSEB.buildBuildVector(LLT::fixed_vector(4, 32), {Reg1, Reg2, Reg1, Reg2});
+  auto BuildVec1 = CSEB.buildBuildVector(LLT::fixed_vector(4, s32),
+                                         {Reg1, Reg2, Reg1, Reg2});
+  auto BuildVec2 = CSEB.buildBuildVector(LLT::fixed_vector(4, s32),
+                                         {Reg1, Reg2, Reg1, Reg2});
   EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, BuildVec1->getOpcode());
   EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, BuildVec2->getOpcode());
   EXPECT_TRUE(&*BuildVec1 == &*BuildVec2);
 
   // Check G_BUILD_VECTOR_TRUNC
-  auto BuildVecTrunc1 = CSEB.buildBuildVectorTrunc(LLT::fixed_vector(4, 16),
+  auto BuildVecTrunc1 = CSEB.buildBuildVectorTrunc(LLT::fixed_vector(4, s16),
                                                    {Reg1, Reg2, Reg1, Reg2});
-  auto BuildVecTrunc2 = CSEB.buildBuildVectorTrunc(LLT::fixed_vector(4, 16),
+  auto BuildVecTrunc2 = CSEB.buildBuildVectorTrunc(LLT::fixed_vector(4, s16),
                                                    {Reg1, Reg2, Reg1, Reg2});
   EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR_TRUNC, BuildVecTrunc1->getOpcode());
   EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR_TRUNC, BuildVecTrunc2->getOpcode());
diff --git a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
index 1ff7fd956d015d..cd144d7b0993ce 100644
--- a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
@@ -14,38 +14,65 @@ using namespace llvm;
 
 namespace {
 static const LLT S1 = LLT::scalar(1);
+static const LLT S2 = LLT::scalar(2);
+static const LLT S3 = LLT::scalar(3);
+static const LLT S4 = LLT::scalar(4);
+static const LLT S5 = LLT::scalar(5);
+static const LLT S6 = LLT::scalar(6);
 static const LLT S8 = LLT::scalar(8);
 static const LLT S16 = LLT::scalar(16);
 static const LLT S32 = LLT::scalar(32);
 static const LLT S64 = LLT::scalar(64);
 static const LLT P0 = LLT::pointer(0, 64);
 static const LLT P1 = LLT::pointer(1, 32);
+static const LLT P3 = LLT::pointer(3, 6);
+static const LLT P4 = LLT::pointer(4, 8);
 
-static const LLT V2S8 = LLT::fixed_vector(2, 8);
-static const LLT V4S8 = LLT::fixed_vector(4, 8);
-static const LLT V8S8 = LLT::fixed_vector(8, 8);
+static const LLT V4S2 = LLT::fixed_vector(4, S2);
 
-static const LLT V2S16 = LLT::fixed_vector(2, 16);
-static const LLT V3S16 = LLT::fixed_vector(3, 16);
-static const LLT V4S16 = LLT::fixed_vector(4, 16);
+static const LLT V3S4 = LLT::fixed_vector(3, S4);
+static const LLT V6S4 = LLT::fixed_vector(6, S4);
 
-static const LLT V2S32 = LLT::fixed_vector(2, 32);
-static const LLT V3S32 = LLT::fixed_vector(3, 32);
-static const LLT V4S32 = LLT::fixed_vector(4, 32);
-static const LLT V6S32 = LLT::fixed_vector(6, 32);
+static const LLT V3S5 = LLT::fixed_vector(2, S5);
 
-static const LLT V2S64 = LLT::fixed_vector(2, 64);
-static const LLT V3S64 = LLT::fixed_vector(3, 64);
-static const LLT V4S64 = LLT::fixed_vector(4, 64);
+static const LLT V2S6 = LLT::fixed_vector(2, S6);
+
+static const LLT V2S8 = LLT::fixed_vector(2, S8);
+static const LLT V3S8 = LLT::fixed_vector(3, S8);
+static const LLT V4S8 = LLT::fixed_vector(4, S8);
+static const LLT V8S8 = LLT::fixed_vector(8, S8);
+static const LLT V12S8 = LLT::fixed_vector(12, S8);
+
+static const LLT V2S16 = LLT::fixed_vector(2, S16);
+static const LLT V3S16 = LLT::fixed_vector(3, S16);
+static const LLT V4S16 = LLT::fixed_vector(4, S16);
+static const LLT V6S16 = LLT::fixed_vector(6, S16);
+
+static const LLT V2S32 = LLT::fixed_vector(2, S32);
+static const LLT V3S32 = LLT::fixed_vector(3, S32);
+static const LLT V4S32 = LLT::fixed_vector(4, S32);
+static const LLT V6S32 = LLT::fixed_vector(6, S32);
+static const LLT V12S32 = LLT::fixed_vector(12, S32);
+
+static const LLT V2S64 = LLT::fixed_vector(2, S64);
+static const LLT V3S64 = LLT::fixed_vector(3, S64);
+static const LLT V4S64 = LLT::fixed_vector(4, S64);
+static const LLT V12S64 = LLT::fixed_vector(12, S64);
 
 static const LLT V2P0 = LLT::fixed_vector(2, P0);
 static const LLT V3P0 = LLT::fixed_vector(3, P0);
 static const LLT V4P0 = LLT::fixed_vector(4, P0);
 static const LLT V6P0 = LLT::fixed_vector(6, P0);
+static const LLT V12P0 = LLT::fixed_vector(12, P0);
 
 static const LLT V2P1 = LLT::fixed_vector(2, P1);
 static const LLT V4P1 = LLT::fixed_vector(4, P1);
 
+static const LLT V2P3 = LLT::fixed_vector(2, P3);
+
+static const LLT V2P4 = LLT::fixed_vector(2, P4);
+static const LLT V3P4 = LLT::fixed_vector(3, P4);
+
 static const LLT NXV1S1 = LLT::scalable_vector(1, S1);
 static const LLT NXV2S1 = LLT::scalable_vector(2, S1);
 static const LLT NXV3S1 = LLT::scalable_vector(3, S1);
@@ -150,17 +177,12 @@ TEST(GISelUtilsTest, getGCDType) {
   EXPECT_EQ(S32, getGCDType(V2S32, V4S8));
 
   // Test cases where neither element type nicely divides.
-  EXPECT_EQ(LLT::scalar(3),
-            getGCDType(LLT::fixed_vector(3, 5), LLT::fixed_vector(2, 6)));
-  EXPECT_EQ(LLT::scalar(3),
-            getGCDType(LLT::fixed_vector(2, 6), LLT::fixed_vector(3, 5)));
+  EXPECT_EQ(S3, getGCDType(V3S5, V2S6));
+  EXPECT_EQ(S3, getGCDType(V2S6, V3S5));
 
   // Have to go smaller than a pointer element.
-  EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::fixed_vector(2, LLT::pointer(3, 6)),
-                                       LLT::fixed_vector(3, 5)));
-  EXPECT_EQ(LLT::scalar(3),
-            getGCDType(LLT::fixed_vector(3, 5),
-                       LLT::fixed_vector(2, LLT::pointer(3, 6))));
+  EXPECT_EQ(S3, getGCDType(V2P3, V3S5));
+  EXPECT_EQ(S3, getGCDType(V3S5, V2P3));
 
   EXPECT_EQ(V4S8, getGCDType(V4S8, S32));
   EXPECT_EQ(S32, getGCDType(S32, V4S8));
@@ -170,19 +192,15 @@ TEST(GISelUtilsTest, getGCDType) {
   EXPECT_EQ(V2S8, getGCDType(V2S8, V4S16));
   EXPECT_EQ(S16, getGCDType(V4S16, V2S8));
 
-  EXPECT_EQ(S8, getGCDType(V2S8, LLT::fixed_vector(4, 2)));
-  EXPECT_EQ(LLT::fixed_vector(4, 2), getGCDType(LLT::fixed_vector(4, 2), S8));
+  EXPECT_EQ(S8, getGCDType(V2S8, V4S2));
+  EXPECT_EQ(V4S2, getGCDType(V4S2, S8));
 
-  EXPECT_EQ(LLT::pointer(4, 8),
-            getGCDType(LLT::fixed_vector(2, LLT::pointer(4, 8)),
-                       LLT::fixed_vector(4, 2)));
+  EXPECT_EQ(P4, getGCDType(V2P4, V4S2));
 
-  EXPECT_EQ(LLT::fixed_vector(4, 2),
-            getGCDType(LLT::fixed_vector(4, 2),
-                       LLT::fixed_vector(2, LLT::pointer(4, 8))));
+  EXPECT_EQ(V4S2, getGCDType(V4S2, V2P4));
 
-  EXPECT_EQ(LLT::scalar(4), getGCDType(LLT::fixed_vector(3, 4), S8));
-  EXPECT_EQ(LLT::scalar(4), getGCDType(S8, LLT::fixed_vector(3, 4)));
+  EXPECT_EQ(S4, getGCDType(V3S4, S8));
+  EXPECT_EQ(S4, getGCDType(S8, V3S4));
 
   // Scalable -> Scalable
   EXPECT_EQ(NXV1S1, getGCDType(NXV1S1, NXV1S32));
@@ -270,8 +288,8 @@ TEST(GISelUtilsTest, getLCMType) {
   EXPECT_EQ(V2S32, getLCMType(V2S32, V2S32));
   EXPECT_EQ(V6S32, getLCMType(V2S32, V3S32));
   EXPECT_EQ(V6S32, getLCMType(V3S32, V2S32));
-  EXPECT_EQ(LLT::fixed_vector(12, S32), getLCMType(V4S32, V3S32));
-  EXPECT_EQ(LLT::fixed_vector(12, S32), getLCMType(V3S32, V4S32));
+  EXPECT_EQ(V12S32, getLCMType(V4S32, V3S32));
+  EXPECT_EQ(V12S32, getLCMType(V3S32, V4S32));
 
   EXPECT_EQ(V2P0, getLCMType(V2P0, V2P0));
   EXPECT_EQ(V2P0, getLCMType(V2P0, P0));
@@ -279,14 +297,14 @@ TEST(GISelUtilsTest, getLCMType) {
   EXPECT_EQ(V2P0, getLCMType(V2P0, V2P0));
   EXPECT_EQ(V6P0, getLCMType(V2P0, V3P0));
   EXPECT_EQ(V6P0, getLCMType(V3P0, V2P0));
-  EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V4P0, V3P0));
-  EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V3P0, V4P0));
+  EXPECT_EQ(V12P0, getLCMType(V4P0, V3P0));
+  EXPECT_EQ(V12P0, getLCMType(V3P0, V4P0));
 
-  EXPECT_EQ(LLT::fixed_vector(12, S64), getLCMType(V4S64, V3P0));
-  EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V3P0, V4S64));
+  EXPECT_EQ(V12S64, getLCMType(V4S64, V3P0));
+  EXPECT_EQ(V12P0, getLCMType(V3P0, V4S64));
 
-  EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V4P0, V3S64));
-  EXPECT_EQ(LLT::fixed_vector(12, S64), getLCMType(V3S64, V4P0));
+  EXPECT_EQ(V12P0, getLCMType(V4P0, V3S64));
+  EXPECT_EQ(V12S64, getLCMType(V3S64, V4P0));
 
   EXPECT_EQ(V2P0, getLCMType(V2P0, S32));
   EXPECT_EQ(V4S32, getLCMType(S32, V2P0));
@@ -313,18 +331,16 @@ TEST(GISelUtilsTest, getLCMType) {
   EXPECT_EQ(V2S16, getLCMType(V2S16, V4S8));
   EXPECT_EQ(V4S8, getLCMType(V4S8, V2S16));
 
-  EXPECT_EQ(LLT::fixed_vector(6, S16), getLCMType(V3S16, V4S8));
-  EXPECT_EQ(LLT::fixed_vector(12, S8), getLCMType(V4S8, V3S16));
+  EXPECT_EQ(V6S16, getLCMType(V3S16, V4S8));
+  EXPECT_EQ(V12S8, getLCMType(V4S8, V3S16));
   EXPECT_EQ(V4S16, getLCMType(V4S16, V4S8));
   EXPECT_EQ(V8S8, getLCMType(V4S8, V4S16));
 
-  EXPECT_EQ(LLT::fixed_vector(6, 4), getLCMType(LLT::fixed_vector(3, 4), S8));
-  EXPECT_EQ(LLT::fixed_vector(3, 8), getLCMType(S8, LLT::fixed_vector(3, 4)));
+  EXPECT_EQ(V6S4, getLCMType(V3S4, S8));
+  EXPECT_EQ(V3S8, getLCMType(S8, V3S4));
 
-  EXPECT_EQ(LLT::fixed_vector(6, 4),
-            getLCMType(LLT::fixed_vector(3, 4), LLT::pointer(4, 8)));
-  EXPECT_EQ(LLT::fixed_vector(3, LLT::pointer(4, 8)),
-            getLCMType(LLT::pointer(4, 8), LLT::fixed_vector(3, 4)));
+  EXPECT_EQ(V6S4, getLCMType(V3S4, V4S8));
+  EXPECT_EQ(V3P4, getLCMType(P4, V3S4));
 
   EXPECT_EQ(V2S64, getLCMType(V2S64, P0));
   EXPECT_EQ(V2P0, getLCMType(P0, V2S64));
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
index 2f3336e9085b63..abc71db33f2daf 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -615,12 +615,12 @@ TEST_F(AArch64GISelMITest, TestVectorSignBitIsZero) {
   if (!TM)
     GTEST_SKIP();
 
-  const LLT V2S32 = LLT::fixed_vector(2, 32);
+  const LLT S32 = LLT::scalar(32);
+  const LLT V2S32 = LLT::fixed_vector(2, S32);
   // Vector buildConstant makes splat G_BUILD_VECTOR instruction.
   auto SignBit = B.buildConstant(V2S32, 0x80000000);
   auto Zero = B.buildConstant(V2S32, 0);
 
-  const LLT S32 = LLT::scalar(32);
   auto NonSplat =
       B.buildBuildVector(V2S32, {B.buildConstant(S32, 1).getReg(0),
                                  B.buildConstant(S32, 2).getReg(0)});
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
index 0932938b209a4b..2d384994992a16 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
@@ -1381,8 +1381,9 @@ TEST_F(AArch64GISelMITest, FewerElementsAnd) {
   if (!TM)
     GTEST_SKIP();
 
-  const LLT V2S32 = LLT::fixed_vector(2, 32);
-  const LLT V5S32 = LLT::fixed_vector(5, 32);
+  const LLT S32 = LLT::scalar(32);
+  const LLT V2S32 = LLT::fixed_vector(2, S32);
+  const LLT V5S32 = LLT::fixed_vector(5, S32);
 
   // Declare your legalization info
   DefineLegalizerInfo(A, {
@@ -1428,8 +1429,8 @@ TEST_F(AArch64GISelMITest, MoreElementsAnd) {
     GTEST_SKIP();
 
   LLT s32 = LLT::scalar(32);
-  LLT v2s32 = LLT::fixed_vector(2, 32);
-  LLT v6s32 = LLT::fixed_vector(6, 32);
+  LLT v2s32 = LLT::fixed_vector(2, s32);
+  LLT v6s32 = LLT::fixed_vector(6, s32);
 
   LegalizerInfo LI;
   LI.getActionDefinitionsBuilder(TargetOpcode::G_AND)
@@ -1480,8 +1481,8 @@ TEST_F(AArch64GISelMITest, FewerElementsPhi) {
   LLT s1 = LLT::scalar(1);
   LLT s32 = LLT::scalar(32);
   LLT s64 = LLT::scalar(64);
-  LLT v2s32 = LLT::fixed_vector(2, 32);
-  LLT v5s32 = LLT::fixed_vector(5, 32);
+  LLT v2s32 = LLT::fixed_vector(2, s32);
+  LLT v5s32 = LLT::fixed_vector(5, s32);
 
   LegalizerInfo LI;
   LI.getActionDefinitionsBuilder(TargetOpcode::G_PHI)
@@ -1622,8 +1623,9 @@ TEST_F(AArch64GISelMITest, LowerMinMax) {
   if (!TM)
     GTEST_SKIP();
 
+  LLT s32 = LLT::scalar(32);
   LLT s64 = LLT::scalar(64);
-  LLT v2s32 = LLT::fixed_vector(2, 32);
+  LLT v2s32 = LLT::fixed_vector(2, s32);
 
   DefineLegalizerInfo(A, {
     getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
@@ -3213,7 +3215,7 @@ TEST_F(AArch64GISelMITest, LowerInsert) {
   LLT S64{LLT::scalar(64)};
   LLT P0{LLT::pointer(0, 64)};
   LLT P1{LLT::pointer(1, 32)};
-  LLT V2S32{LLT::fixed_vector(2, 32)};
+  LLT V2S32{LLT::fixed_vector(2, S32)};
 
   auto TruncS32 = B.buildTrunc(S32, Copies[0]);
   auto IntToPtrP0 = B.buildIntToPtr(P0, Copies[0]);
@@ -3335,9 +3337,11 @@ TEST_F(AArch64GISelMITest, LowerBSWAP) {
 
   DefineLegalizerInfo(A, {});
 
+  LLT S32{LLT::scalar(32)};
+
   // Make sure vector lowering doesn't assert.
-  auto Cast = B.buildBitcast(LLT::fixed_vector(2, 32), Copies[0]);
-  auto BSwap = B.buildBSwap(LLT::fixed_vector(2, 32), Cast);
+  auto Cast = B.buildBitcast(LLT::fixed_vector(2, S32), Copies[0]);
+  auto BSwap = B.buildBSwap(LLT::fixed_vector(2, S32), Cast);
   AInfo Info(MF->getSubtarget());
   DummyGISelObserver Observer;
   LegalizerHelper Helper(*MF, Info, Observer, B);
@@ -3527,8 +3531,9 @@ TEST_F(AArch64GISelMITest, BitcastLoad) {
     GTEST_SKIP();
 
   LLT P0 = LLT::pointer(0, 64);
+  LLT S8 = LLT::scalar(8);
   LLT S32 = LLT::scalar(32);
-  LLT V4S8 = LLT::fixed_vector(4, 8);
+  LLT V4S8 = LLT::fixed_vector(4, S8);
   auto Ptr = B.buildUndef(P0);
 
   DefineLegalizerInfo(A, {});
@@ -3561,8 +3566,9 @@ TEST_F(AArch64GISelMITest, BitcastStore) {
     GTEST_SKIP();
 
   LLT P0 = LLT::pointer(0, 64);
+  LLT S8 = LLT::scalar(8);
   LLT S32 = LLT::scalar(32);
-  LLT V4S8 = LLT::fixed_vector(4, 8);
+  LLT V4S8 = LLT::fixed_vector(4, S8);
   auto Ptr = B.buildUndef(P0);
 
   DefineLegalizerInfo(A, {});
@@ -3595,8 +3601,10 @@ TEST_F(AArch64GISelMITest, BitcastSelect) {
     GTEST_SKIP();
 
   LLT S1 = LLT::scalar(1);
+  LLT S4 = LLT::scalar(4);
+  LLT S8 = LLT::scalar(8);
   LLT S32 = LLT::scalar(32);
-  LLT V4S8 = LLT::fixed_vector(4, 8);
+  LLT V4S8 = LLT::fixed_vector(4, S8);
 
   DefineLegalizerInfo(A, {});
 
@@ -3626,14 +3634,14 @@ TEST_F(AArch64GISelMITest, BitcastSelect) {
   EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
 
   // Doesn't make sense
-  auto VCond = B.buildUndef(LLT::fixed_vector(4, 1));
+  auto VCond = B.buildUndef(LLT::fixed_vector(4, S1));
   auto VSelect = B.buildSelect(V4S8, VCond, Val0, Val1);
 
   B.setInsertPt(*EntryMBB, VSelect->getIterator());
   EXPECT_EQ(LegalizerHelper::LegalizeResult::UnableToLegalize,
             Helper.bitcast(*VSelect, 0, S32));
   EXPECT_EQ(LegalizerHelper::LegalizeResult::UnableToLegalize,
-            Helper.bitcast(*VSelect, 1, LLT::scalar(4)));
+            Helper.bitcast(*VSelect, 1, S4));
 }
 
 TEST_F(AArch64GISelMITest, BitcastBitOps) {
@@ -3641,8 +3649,9 @@ TEST_F(AArch64GISelMITest, BitcastBitOps) {
   if (!TM)
     GTEST_SKIP();
 
+  LLT S8 = LLT::scalar(8);
   LLT S32 = LLT::scalar(32);
-  LLT V4S8 = LLT::fixed_vector(4, 8);
+  LLT V4S8 = LLT::fixed_vector(4, S8);
 
   DefineLegalizerInfo(A, {});
 
@@ -3729,7 +3738,7 @@ TEST_F(AArch64GISelMITest, NarrowImplicitDef) {
   LLT S32{LLT::scalar(32)};
   LLT S48{LLT::scalar(48)};
   LLT S64{LLT::scalar(64)};
-  LLT V2S64{{LLT::fixed_vector(2, 64)}};
+  LLT V2S64{{LLT::fixed_vector(2, S64)}};
 
   auto Implicit1 = B.buildUndef(S64);
   auto Implicit2 = B.buildUndef(S64);
@@ -3789,10 +3798,11 @@ TEST_F(AArch64GISelMITest, WidenFreeze) {
   DefineLegalizerInfo(A, {});
 
   // Make sure that G_FREEZE is widened with anyext
+  LLT S32{LLT::scalar(32)};
   LLT S64{LLT::scalar(64)};
   LLT S128{LLT::scalar(128)};
-  LLT V2S32{LLT::fixed_vector(2, 32)};
-  LLT V2S64{LLT::fixed_vector(2, 64)};
+  LLT V2S32{LLT::fixed_vector(2, S32)};
+  LLT V2S64{LLT::fixed_vector(2, S64)};
 
   auto Vector = B.buildBitcast(V2S32, Copies[0]);
 
@@ -3839,13 +3849,14 @@ TEST_F(AArch64GISelMITest, NarrowFreeze) {
   DefineLegalizerInfo(A, {});
 
   // Make sure that G_FREEZE is narrowed using unmerge/extract
+  LLT S16{LLT::scalar(16)};
   LLT S32{LLT::scalar(32)};
   LLT S33{LLT::scalar(33)};
   LLT S48{LLT::scalar(48)};
   LLT S64{LLT::scalar(64)};
-  LLT V2S16{LLT::fixed_vector(2, 16)};
-  LLT V3S16{LLT::fixed_vector(3, 16)};
-  LLT V4S16{LLT::fixed_vector(4, 16)};
+  LLT V2S16{LLT::fixed_vector(2, S16)};
+  LLT V3S16{LLT::fixed_vector(3, S16)};
+  LLT V4S16{LLT::fixed_vector(4, S16)};
 
   auto Trunc = B.buildTrunc(S33, {Copies[0]});
   auto Trunc1 = B.buildTrunc(S48, {Copies[0]});
@@ -3922,10 +3933,11 @@ TEST_F(AArch64GISelMITest, FewerElementsFreeze) {
 
   DefineLegalizerInfo(A, {});
 
+  LLT S16{LLT::scalar(16)};
   LLT S32{LLT::scalar(32)};
-  LLT V2S16{LLT::fixed_vector(2, 16)};
-  LLT V2S32{LLT::fixed_vector(2, 32)};
-  LLT V4S16{LLT::fixed_vector(4, 16)};
+  LLT V2S16{LLT::fixed_vector(2, S16)};
+  LLT V2S32{LLT::fixed_vector(2, S32)};
+  LLT V4S16{LLT::fixed_vector(4, S16)};
 
   auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
   auto Vector2 = B.buildBitcast(V4S16, Copies[0]);
@@ -3975,8 +3987,9 @@ TEST_F(AArch64GISelMITest, MoreElementsFreeze) {
 
   DefineLegalizerInfo(A, {});
 
-  LLT V2S32{LLT::fixed_vector(2, 32)};
-  LLT V4S32{LLT::fixed_vector(4, 32)};
+  LLT S32{LLT::scalar(32)};
+  LLT V2S32{LLT::fixed_vector(2, S32)};
+  LLT V4S32{LLT::fixed_vector(4, S32)};
 
   auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
   auto FreezeVector1 = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector1});
@@ -4016,9 +4029,9 @@ TEST_F(AArch64GISelMITest, FewerElementsInsertVectorElt) {
   LLT P0{LLT::pointer(0, 64)};
   LLT S64{LLT::scalar(64)};
   LLT S16{LLT::scalar(16)};
-  LLT V2S16{LLT::fixed_vector(2, 16)};
-  LLT V3S16{LLT::fixed_vector(3, 16)};
-  LLT V8S16{LLT::fixed_vector(8, 16)};
+  LLT V2S16{LLT::fixed_vector(2, S16)};
+  LLT V3S16{LLT::fixed_vector(3, S16)};
+  LLT V8S16{LLT::fixed_vector(8, S16)};
 
   auto Ptr0 = B.buildIntToPtr(P0, Copies[0]);
   auto VectorV8 = B.buildLoad(V8S16, Ptr0, MachinePointerInfo(), Align(8));
@@ -4234,9 +4247,10 @@ TEST_F(AArch64GISelMITest, MoreElementsSelect) {
     GTEST_SKIP();
 
   LLT s1 = LLT::scalar(1);
+  LLT s32 = LLT::scalar(32);
   LLT s64 = LLT::scalar(64);
-  LLT v2s1 = LLT::fixed_vector(2, 1);
-  LLT v2s32 = LLT::fixed_vector(2, 32);
+  LLT v2s1 = LLT::fixed_vector(2, s1);
+  LLT v2s32 = LLT::fixed_vector(2, s32);
 
   LegalizerInfo LI;
   DummyGISelObserver Observer;
@@ -4256,7 +4270,7 @@ TEST_F(AArch64GISelMITest, MoreElementsSelect) {
   B.setInstr(*Select);
 
   EXPECT_EQ(LegalizerHelper::LegalizeResult::UnableToLegalize,
-            Helper.moreElementsVector(*Select, 1, LLT::fixed_vector(3, 1)));
+            Helper.moreElementsVector(*Select, 1, LLT::fixed_vector(3, s1)));
   EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
             Helper.moreElementsVector(*Select, 1, v2s1));
 
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
index 988e307909232a..aafddc76905722 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
@@ -95,17 +95,17 @@ TEST(LegalizerInfoTest, VectorRISC) {
   LegalizerInfo L;
   auto &LegacyInfo = L.getLegacyLegalizerInfo();
   // Typical RISCy set of operations based on ARM.
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, 8)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, LLT::scalar(8))},
                        LegacyLegalizeActions::Legal);
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(16, 8)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(16, LLT::scalar(8))},
                        LegacyLegalizeActions::Legal);
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, 16)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, LLT::scalar(16))},
                        LegacyLegalizeActions::Legal);
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, 16)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, LLT::scalar(16))},
                        LegacyLegalizeActions::Legal);
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(2, 32)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(2, LLT::scalar(32))},
                        LegacyLegalizeActions::Legal);
-  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, 32)},
+  LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, LLT::scalar(32))},
                        LegacyLegalizeActions::Legal);
 
   LegacyInfo.setLegalizeVectorElementToDifferentSizeStrategy(
@@ -118,19 +118,24 @@ TEST(LegalizerInfoTest, VectorRISC) {
 
   // Check we infer the correct types and actually do what we're told for some
   // simple cases.
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 8)}}),
+  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, LLT::scalar(8))}}),
             LegalizeActionStep(Legal, 0, LLT{}));
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 7)}}),
-            LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(8, 8)));
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(2, 8)}}),
-            LegalizeActionStep(MoreElements, 0, LLT::fixed_vector(8, 8)));
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 32)}}),
-            LegalizeActionStep(FewerElements, 0, LLT::fixed_vector(4, 32)));
+  EXPECT_EQ(
+      L.getAction({G_ADD, {LLT::fixed_vector(8, LLT::scalar(7))}}),
+      LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(8, LLT::scalar(8))));
+  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(2, LLT::scalar(8))}}),
+            LegalizeActionStep(MoreElements, 0,
+                               LLT::fixed_vector(8, LLT::scalar(8))));
+  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, LLT::scalar(32))}}),
+            LegalizeActionStep(FewerElements, 0,
+                               LLT::fixed_vector(4, LLT::scalar(32))));
   // Check a few non-power-of-2 sizes:
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(3, 3)}}),
-            LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(3, 8)));
-  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(3, 8)}}),
-            LegalizeActionStep(MoreElements, 0, LLT::fixed_vector(8, 8)));
+  EXPECT_EQ(
+      L.getAction({G_ADD, {LLT::fixed_vector(3, LLT::scalar(3))}}),
+      LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(3, LLT::scalar(8))));
+  EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(3, LLT::scalar(8))}}),
+            LegalizeActionStep(MoreElements, 0,
+                               LLT::fixed_vector(8, LLT::scalar(8))));
 }
 
 TEST(LegalizerInfoTest, MultipleTypes) {
@@ -230,15 +235,15 @@ TEST(LegalizerInfoTest, RuleSets) {
   const LLT s33 = LLT::scalar(33);
   const LLT s64 = LLT::scalar(64);
 
-  const LLT v2s5 = LLT::fixed_vector(2, 5);
-  const LLT v2s8 = LLT::fixed_vector(2, 8);
-  const LLT v2s16 = LLT::fixed_vector(2, 16);
-  const LLT v2s32 = LLT::fixed_vector(2, 32);
-  const LLT v3s32 = LLT::fixed_vector(3, 32);
-  const LLT v4s32 = LLT::fixed_vector(4, 32);
-  const LLT v8s32 = LLT::fixed_vector(8, 32);
-  const LLT v2s33 = LLT::fixed_vector(2, 33);
-  const LLT v2s64 = LLT::fixed_vector(2, 64);
+  const LLT v2s5 = LLT::fixed_vector(2, s5);
+  const LLT v2s8 = LLT::fixed_vector(2, s8);
+  const LLT v2s16 = LLT::fixed_vector(2, s16);
+  const LLT v2s32 = LLT::fixed_vector(2, s32);
+  const LLT v3s32 = LLT::fixed_vector(3, s32);
+  const LLT v4s32 = LLT::fixed_vector(4, s32);
+  const LLT v8s32 = LLT::fixed_vector(8, s32);
+  const LLT v2s33 = LLT::fixed_vector(2, s33);
+  const LLT v2s64 = LLT::fixed_vector(2, s64);
 
   const LLT p0 = LLT::pointer(0, 32);
   const LLT v2p0 = LLT::fixed_vector(2, p0);
@@ -246,8 +251,8 @@ TEST(LegalizerInfoTest, RuleSets) {
   const LLT v4p0 = LLT::fixed_vector(4, p0);
 
   const LLT s1 = LLT::scalar(1);
-  const LLT v2s1 = LLT::fixed_vector(2, 1);
-  const LLT v4s1 = LLT::fixed_vector(4, 1);
+  const LLT v2s1 = LLT::fixed_vector(2, s1);
+  const LLT v4s1 = LLT::fixed_vector(4, s1);
 
   {
     LegalizerInfo LI;
@@ -420,12 +425,12 @@ TEST(LegalizerInfoTest, RuleSets) {
 
     // Raw type form
     LI.getActionDefinitionsBuilder(G_ADD)
-      .fewerElementsIf(typeIs(0, v4s32), changeElementCountTo(0, v2s32))
-      .fewerElementsIf(typeIs(0, v8s32), changeElementCountTo(0, s32))
-      .fewerElementsIf(typeIs(0, LLT::scalable_vector(4, 16)),
-                       changeElementCountTo(0, LLT::scalable_vector(2, 16)))
-      .fewerElementsIf(typeIs(0, LLT::scalable_vector(8, 16)),
-                       changeElementCountTo(0, s16));
+        .fewerElementsIf(typeIs(0, v4s32), changeElementCountTo(0, v2s32))
+        .fewerElementsIf(typeIs(0, v8s32), changeElementCountTo(0, s32))
+        .fewerElementsIf(typeIs(0, LLT::scalable_vector(4, s16)),
+                         changeElementCountTo(0, LLT::scalable_vector(2, s16)))
+        .fewerElementsIf(typeIs(0, LLT::scalable_vector(8, s16)),
+                         changeElementCountTo(0, s16));
 
     LegacyInfo.computeTables();
 
@@ -434,20 +439,20 @@ TEST(LegalizerInfoTest, RuleSets) {
     EXPECT_ACTION(MoreElements, 1, v2s1, LegalityQuery(G_SELECT, {v2s32, s1}));
     EXPECT_ACTION(MoreElements, 1, v4s1, LegalityQuery(G_SELECT, {v4p0, s1}));
 
-    EXPECT_ACTION(MoreElements, 1, LLT::scalable_vector(2, 1),
-                  LegalityQuery(G_SELECT, {LLT::scalable_vector(2, 32), s1}));
-    EXPECT_ACTION(MoreElements, 1, LLT::scalable_vector(4, 1),
-                  LegalityQuery(G_SELECT, {LLT::scalable_vector(4, 32), s1}));
+    EXPECT_ACTION(MoreElements, 1, LLT::scalable_vector(2, s1),
+                  LegalityQuery(G_SELECT, {LLT::scalable_vector(2, s32), s1}));
+    EXPECT_ACTION(MoreElements, 1, LLT::scalable_vector(4, s1),
+                  LegalityQuery(G_SELECT, {LLT::scalable_vector(4, s32), s1}));
     EXPECT_ACTION(MoreElements, 1, LLT::scalable_vector(2, s1),
                   LegalityQuery(G_SELECT, {LLT::scalable_vector(2, p0), s1}));
 
     EXPECT_ACTION(FewerElements, 0, v2s32, LegalityQuery(G_ADD, {v4s32}));
     EXPECT_ACTION(FewerElements, 0, s32, LegalityQuery(G_ADD, {v8s32}));
 
-    EXPECT_ACTION(FewerElements, 0, LLT::scalable_vector(2, 16),
-                  LegalityQuery(G_ADD, {LLT::scalable_vector(4, 16)}));
+    EXPECT_ACTION(FewerElements, 0, LLT::scalable_vector(2, s16),
+                  LegalityQuery(G_ADD, {LLT::scalable_vector(4, s16)}));
     EXPECT_ACTION(FewerElements, 0, s16,
-                  LegalityQuery(G_ADD, {LLT::scalable_vector(8, 16)}));
+                  LegalityQuery(G_ADD, {LLT::scalable_vector(8, s16)}));
   }
 
   // Test minScalarEltSameAsIf
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
index 625e2c92b11193..a04821f582b0ac 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
@@ -33,8 +33,8 @@ ::testing::AssertionResult isNullMIPtr(const MachineInstr *MI) {
 DefineLegalizerInfo(ALegalizer, {
   auto p0 = LLT::pointer(0, 64);
   auto s8 = LLT::scalar(8);
-  auto v2s8 = LLT::fixed_vector(2, 8);
-  auto v2s16 = LLT::fixed_vector(2, 16);
+  auto v2s8 = LLT::fixed_vector(2, s8);
+  auto v2s16 = LLT::fixed_vector(2, s16);
   getActionDefinitionsBuilder(G_LOAD)
       .legalForTypesWithMemDesc({{s16, p0, s8, 8}})
       .scalarize(0)
diff --git a/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp b/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
index c85e6d486e0acf..28b7fe0d4bd943 100644
--- a/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
@@ -14,11 +14,13 @@ TEST_F(AArch64GISelMITest, TestBuildConstantFConstant) {
   if (!TM)
     GTEST_SKIP();
 
-  B.buildConstant(LLT::scalar(32), 42);
-  B.buildFConstant(LLT::scalar(32), 1.0);
+  LLT S32 = LLT::scalar(32);
+
+  B.buildConstant(S32, 42);
+  B.buildFConstant(S32, 1.0);
 
-  B.buildConstant(LLT::fixed_vector(2, 32), 99);
-  B.buildFConstant(LLT::fixed_vector(2, 32), 2.0);
+  B.buildConstant(LLT::fixed_vector(2, S32), 99);
+  B.buildFConstant(LLT::fixed_vector(2, S32), 2.0);
 
   // Test APFloat overload.
   APFloat KVal(APFloat::IEEEdouble(), "4.0");
@@ -51,21 +53,21 @@ TEST_F(AArch64GISelMITest, TestBuildConstantFConstantDeath) {
   // Test APInt version breaks
   EXPECT_DEATH(B.buildConstant(LLT::scalar(16), APV32),
                "creating constant with the wrong size");
-  EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), APV32),
+  EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, LLT::scalar(16)), APV32),
                "creating constant with the wrong size");
 
   // Test ConstantInt version breaks
   ConstantInt *CI = ConstantInt::get(Ctx, APV32);
   EXPECT_DEATH(B.buildConstant(LLT::scalar(16), *CI),
                "creating constant with the wrong size");
-  EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), *CI),
+  EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, LLT::scalar(16)), *CI),
                "creating constant with the wrong size");
 
   APFloat DoubleVal(APFloat::IEEEdouble());
   ConstantFP *CF = ConstantFP::get(Ctx, DoubleVal);
   EXPECT_DEATH(B.buildFConstant(LLT::scalar(16), *CF),
                "creating fconstant with the wrong size");
-  EXPECT_DEATH(B.buildFConstant(LLT::fixed_vector(2, 16), *CF),
+  EXPECT_DEATH(B.buildFConstant(LLT::fixed_vector(2, LLT::scalar(16)), *CF),
                "creating fconstant with the wrong size");
 }
 
@@ -337,11 +339,11 @@ TEST_F(AArch64GISelMITest, BuildMergeLikeInstr) {
   // G_MERGE_VALUES.
   B.buildMergeLikeInstr(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
   // Merging plain constants to a vector should produce a G_BUILD_VECTOR.
-  LLT V2x32 = LLT::fixed_vector(2, 32);
+  LLT V2x32 = LLT::fixed_vector(2, S32);
   Register RegC0C1 = B.buildMergeLikeInstr(V2x32, {RegC0, RegC1}).getReg(0);
   Register RegC2C3 = B.buildMergeLikeInstr(V2x32, {RegC2, RegC3}).getReg(0);
   // Merging vector constants to a vector should produce a G_CONCAT_VECTORS.
-  B.buildMergeLikeInstr(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
+  B.buildMergeLikeInstr(LLT::fixed_vector(4, S32), {RegC0C1, RegC2C3});
   // Merging vector constants to a plain type is not allowed.
   // Nothing else to test.
 
@@ -376,7 +378,7 @@ TEST_F(MachineIRBuilderDeathTest, BuildMergeValues) {
   B.buildMergeValues(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
 
   // Using a vector destination type should assert.
-  LLT V2x32 = LLT::fixed_vector(2, 32);
+  LLT V2x32 = LLT::fixed_vector(2, S32);
   EXPECT_DEBUG_DEATH(
       B.buildMergeValues(V2x32, {RegC0, RegC1}),
       "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
@@ -486,13 +488,15 @@ TEST_F(AArch64GISelMITest, BuildExtractSubvector) {
   if (!TM)
     GTEST_SKIP();
 
-  LLT VecTy = LLT::fixed_vector(4, 32);
-  LLT SubVecTy = LLT::fixed_vector(2, 32);
+  LLT S32 = LLT::scalar(32);
+
+  LLT VecTy = LLT::fixed_vector(4, S32);
+  LLT SubVecTy = LLT::fixed_vector(2, S32);
   auto Vec = B.buildUndef(VecTy);
   B.buildExtractSubvector(SubVecTy, Vec, 0);
 
-  VecTy = LLT::scalable_vector(4, 32);
-  SubVecTy = LLT::scalable_vector(2, 32);
+  VecTy = LLT::scalable_vector(4, S32);
+  SubVecTy = LLT::scalable_vector(2, S32);
   Vec = B.buildUndef(VecTy);
   B.buildExtractSubvector(SubVecTy, Vec, 0);
 
diff --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
index 59a86fa5646f36..1b05c9e2156979 100644
--- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
@@ -511,7 +511,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificType) {
                        m_GAdd(m_SpecificType(s64), m_Reg())));
 
   // Try to match the destination type of a bitcast.
-  LLT v2s32 = LLT::fixed_vector(2, 32);
+  LLT v2s32 = LLT::fixed_vector(2, s32);
   auto MIBCast = B.buildCast(v2s32, Copies[0]);
   EXPECT_TRUE(
       mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));
@@ -724,8 +724,8 @@ TEST_F(AArch64GISelMITest, MatchConstantSplat) {
     GTEST_SKIP();
 
   LLT s64 = LLT::scalar(64);
-  LLT v2s64 = LLT::fixed_vector(2, 64);
-  LLT v4s64 = LLT::fixed_vector(4, 64);
+  LLT v2s64 = LLT::fixed_vector(2, s64);
+  LLT v4s64 = LLT::fixed_vector(4, s64);
 
   Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
   Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index 43aa4009897eeb..125fc71bebab00 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -79,7 +79,7 @@ TEST(LowLevelTypeTest, Vector) {
           ElementCount::getScalable(3), ElementCount::getScalable(4),
           ElementCount::getScalable(32), ElementCount::getScalable(0xff)}) {
       const LLT STy = LLT::scalar(S);
-      const LLT VTy = LLT::vector(EC, S);
+      const LLT VTy = LLT::vector(EC, STy);
 
       // Test the alternative vector().
       {
@@ -128,16 +128,16 @@ TEST(LowLevelTypeTest, Vector) {
 TEST(LowLevelTypeTest, ScalarOrVector) {
   // Test version with number of bits for scalar type.
   EXPECT_EQ(LLT::scalar(32),
-            LLT::scalarOrVector(ElementCount::getFixed(1), 32));
-  EXPECT_EQ(LLT::fixed_vector(2, 32),
-            LLT::scalarOrVector(ElementCount::getFixed(2), 32));
-  EXPECT_EQ(LLT::scalable_vector(1, 32),
-            LLT::scalarOrVector(ElementCount::getScalable(1), 32));
+            LLT::scalarOrVector(ElementCount::getFixed(1), LLT::scalar(32)));
+  EXPECT_EQ(LLT::fixed_vector(2, LLT::scalar(32)),
+            LLT::scalarOrVector(ElementCount::getFixed(2), LLT::scalar(32)));
+  EXPECT_EQ(LLT::scalable_vector(1, LLT::scalar(32)),
+            LLT::scalarOrVector(ElementCount::getScalable(1), LLT::scalar(32)));
 
   // Test version with LLT for scalar type.
   EXPECT_EQ(LLT::scalar(32),
             LLT::scalarOrVector(ElementCount::getFixed(1), LLT::scalar(32)));
-  EXPECT_EQ(LLT::fixed_vector(2, 32),
+  EXPECT_EQ(LLT::fixed_vector(2, LLT::scalar(32)),
             LLT::scalarOrVector(ElementCount::getFixed(2), LLT::scalar(32)));
 
   // Test with pointer elements.
@@ -155,8 +155,8 @@ TEST(LowLevelTypeTest, ChangeElementType) {
   const LLT S32 = LLT::scalar(32);
   const LLT S64 = LLT::scalar(64);
 
-  const LLT V2S32 = LLT::fixed_vector(2, 32);
-  const LLT V2S64 = LLT::fixed_vector(2, 64);
+  const LLT V2S32 = LLT::fixed_vector(2, LLT::scalar(32));
+  const LLT V2S64 = LLT::fixed_vector(2, LLT::scalar(64));
 
   const LLT V2P0 = LLT::fixed_vector(2, P0);
   const LLT V2P1 = LLT::fixed_vector(2, P1);
@@ -164,14 +164,14 @@ TEST(LowLevelTypeTest, ChangeElementType) {
   EXPECT_EQ(S64, S32.changeElementType(S64));
   EXPECT_EQ(S32, S32.changeElementType(S32));
 
-  EXPECT_EQ(S32, S64.changeElementSize(32));
-  EXPECT_EQ(S32, S32.changeElementSize(32));
+  EXPECT_EQ(S32, S64.changeElementType(S32));
+  EXPECT_EQ(S32, S32.changeElementType(S32));
 
   EXPECT_EQ(V2S64, V2S32.changeElementType(S64));
   EXPECT_EQ(V2S32, V2S64.changeElementType(S32));
 
-  EXPECT_EQ(V2S64, V2S32.changeElementSize(64));
-  EXPECT_EQ(V2S32, V2S64.changeElementSize(32));
+  EXPECT_EQ(V2S64, V2S32.changeElementType(S64));
+  EXPECT_EQ(V2S32, V2S64.changeElementType(S32));
 
   EXPECT_EQ(P0, S32.changeElementType(P0));
   EXPECT_EQ(S32, P0.changeElementType(S32));
@@ -180,8 +180,8 @@ TEST(LowLevelTypeTest, ChangeElementType) {
   EXPECT_EQ(V2S32, V2P0.changeElementType(S32));
 
   // Similar tests for scalable vectors.
-  const LLT NXV2S32 = LLT::scalable_vector(2, 32);
-  const LLT NXV2S64 = LLT::scalable_vector(2, 64);
+  const LLT NXV2S32 = LLT::scalable_vector(2, S32);
+  const LLT NXV2S64 = LLT::scalable_vector(2, S64);
 
   const LLT NXV2P0 = LLT::scalable_vector(2, P0);
   const LLT NXV2P1 = LLT::scalable_vector(2, P1);
@@ -189,8 +189,8 @@ TEST(LowLevelTypeTest, ChangeElementType) {
   EXPECT_EQ(NXV2S64, NXV2S32.changeElementType(S64));
   EXPECT_EQ(NXV2S32, NXV2S64.changeElementType(S32));
 
-  EXPECT_EQ(NXV2S64, NXV2S32.changeElementSize(64));
-  EXPECT_EQ(NXV2S32, NXV2S64.changeElementSize(32));
+  EXPECT_EQ(NXV2S64, NXV2S32.changeElementType(S64));
+  EXPECT_EQ(NXV2S32, NXV2S64.changeElementType(S32));
 
   EXPECT_EQ(NXV2P1, NXV2P0.changeElementType(P1));
   EXPECT_EQ(NXV2S32, NXV2P0.changeElementType(S32));
@@ -202,8 +202,8 @@ TEST(LowLevelTypeTest, ChangeNumElements) {
   const LLT V3P0 = LLT::fixed_vector(3, P0);
 
   const LLT S64 = LLT::scalar(64);
-  const LLT V2S64 = LLT::fixed_vector(2, 64);
-  const LLT V3S64 = LLT::fixed_vector(3, 64);
+  const LLT V2S64 = LLT::fixed_vector(2, S64);
+  const LLT V3S64 = LLT::fixed_vector(3, S64);
 
   // Vector to scalar
   EXPECT_EQ(S64, V2S64.changeElementCount(ElementCount::getFixed(1)));
@@ -218,8 +218,8 @@ TEST(LowLevelTypeTest, ChangeNumElements) {
   EXPECT_EQ(V3P0, V2P0.changeElementCount(ElementCount::getFixed(3)));
   EXPECT_EQ(V2P0, P0.changeElementCount(ElementCount::getFixed(2)));
 
-  const LLT NXV2S64 = LLT::scalable_vector(2, 64);
-  const LLT NXV3S64 = LLT::scalable_vector(3, 64);
+  const LLT NXV2S64 = LLT::scalable_vector(2, S64);
+  const LLT NXV3S64 = LLT::scalable_vector(3, S64);
   const LLT NXV2P0 = LLT::scalable_vector(2, P0);
 
   // Scalable vector to scalar
@@ -242,19 +242,21 @@ TEST(LowLevelTypeTest, ChangeNumElements) {
 
 // Invalid to directly change the element size for pointers.
 TEST(LowLevelTypeTest, ChangeElementTypeDeath) {
+  const LLT S32 = LLT::scalar(32);
+  const LLT S64 = LLT::scalar(64);
   const LLT P0 = LLT::pointer(0, 32);
   const LLT V2P0 = LLT::fixed_vector(2, P0);
 
-  EXPECT_DEATH(P0.changeElementSize(64),
-               "invalid to directly change element size for pointers");
-  EXPECT_DEATH(V2P0.changeElementSize(64),
-               "invalid to directly change element size for pointers");
+  EXPECT_DEATH(P0.changeElementType(S64),
+               "invalid to directly change element type for pointers");
+  EXPECT_DEATH(V2P0.changeElementType(S64),
+               "invalid to directly change element type for pointers");
 
-  // Make sure this still fails even without a change in size.
-  EXPECT_DEATH(P0.changeElementSize(32),
-               "invalid to directly change element size for pointers");
-  EXPECT_DEATH(V2P0.changeElementSize(32),
-               "invalid to directly change element size for pointers");
+  // Make sure this still fails even without a change in type.
+  EXPECT_DEATH(P0.changeElementType(S32),
+               "invalid to directly change element type for pointers");
+  EXPECT_DEATH(V2P0.changeElementType(S32),
+               "invalid to directly change element type for pointers");
 }
 
 #endif
@@ -333,8 +335,9 @@ TEST(LowLevelTypeTest, Divide) {
   EXPECT_EQ(LLT::scalar(32), LLT::pointer(0, 64).divide(2));
 
   // Test dividing vectors.
-  EXPECT_EQ(LLT::scalar(32), LLT::fixed_vector(2, 32).divide(2));
-  EXPECT_EQ(LLT::fixed_vector(2, 32), LLT::fixed_vector(4, 32).divide(2));
+  EXPECT_EQ(LLT::scalar(32), LLT::fixed_vector(2, LLT::scalar(32)).divide(2));
+  EXPECT_EQ(LLT::fixed_vector(2, LLT::scalar(32)),
+            LLT::fixed_vector(4, LLT::scalar(32)).divide(2));
 
   // Test vector of pointers
   EXPECT_EQ(LLT::pointer(1, 64),
@@ -345,16 +348,20 @@ TEST(LowLevelTypeTest, Divide) {
 
 TEST(LowLevelTypeTest, MultiplyElements) {
   // Basic scalar->vector cases
-  EXPECT_EQ(LLT::fixed_vector(2, 16), LLT::scalar(16).multiplyElements(2));
-  EXPECT_EQ(LLT::fixed_vector(3, 16), LLT::scalar(16).multiplyElements(3));
-  EXPECT_EQ(LLT::fixed_vector(4, 32), LLT::scalar(32).multiplyElements(4));
-  EXPECT_EQ(LLT::fixed_vector(4, 7), LLT::scalar(7).multiplyElements(4));
+  EXPECT_EQ(LLT::fixed_vector(2, LLT::scalar(16)),
+            LLT::scalar(16).multiplyElements(2));
+  EXPECT_EQ(LLT::fixed_vector(3, LLT::scalar(16)),
+            LLT::scalar(16).multiplyElements(3));
+  EXPECT_EQ(LLT::fixed_vector(4, LLT::scalar(32)),
+            LLT::scalar(32).multiplyElements(4));
+  EXPECT_EQ(LLT::fixed_vector(4, LLT::scalar(7)),
+            LLT::scalar(7).multiplyElements(4));
 
   // Basic vector to vector cases
-  EXPECT_EQ(LLT::fixed_vector(4, 32),
-            LLT::fixed_vector(2, 32).multiplyElements(2));
-  EXPECT_EQ(LLT::fixed_vector(9, 32),
-            LLT::fixed_vector(3, 32).multiplyElements(3));
+  EXPECT_EQ(LLT::fixed_vector(4, LLT::scalar(32)),
+            LLT::fixed_vector(2, LLT::scalar(32)).multiplyElements(2));
+  EXPECT_EQ(LLT::fixed_vector(9, LLT::scalar(32)),
+            LLT::fixed_vector(3, LLT::scalar(32)).multiplyElements(3));
 
   // Pointer to vector of pointers
   EXPECT_EQ(LLT::fixed_vector(2, LLT::pointer(0, 32)),
@@ -371,16 +378,16 @@ TEST(LowLevelTypeTest, MultiplyElements) {
             LLT::fixed_vector(3, LLT::pointer(1, 32)).multiplyElements(3));
 
   // Scalable vectors
-  EXPECT_EQ(LLT::scalable_vector(4, 16),
-            LLT::scalable_vector(2, 16).multiplyElements(2));
-  EXPECT_EQ(LLT::scalable_vector(6, 16),
-            LLT::scalable_vector(2, 16).multiplyElements(3));
-  EXPECT_EQ(LLT::scalable_vector(9, 16),
-            LLT::scalable_vector(3, 16).multiplyElements(3));
-  EXPECT_EQ(LLT::scalable_vector(4, 32),
-            LLT::scalable_vector(2, 32).multiplyElements(2));
-  EXPECT_EQ(LLT::scalable_vector(256, 32),
-            LLT::scalable_vector(8, 32).multiplyElements(32));
+  EXPECT_EQ(LLT::scalable_vector(4, LLT::scalar(16)),
+            LLT::scalable_vector(2, LLT::scalar(16)).multiplyElements(2));
+  EXPECT_EQ(LLT::scalable_vector(6, LLT::scalar(16)),
+            LLT::scalable_vector(2, LLT::scalar(16)).multiplyElements(3));
+  EXPECT_EQ(LLT::scalable_vector(9, LLT::scalar(16)),
+            LLT::scalable_vector(3, LLT::scalar(16)).multiplyElements(3));
+  EXPECT_EQ(LLT::scalable_vector(4, LLT::scalar(32)),
+            LLT::scalable_vector(2, LLT::scalar(32)).multiplyElements(2));
+  EXPECT_EQ(LLT::scalable_vector(256, LLT::scalar(32)),
+            LLT::scalable_vector(8, LLT::scalar(32)).multiplyElements(32));
 
   // Scalable vectors of pointers
   EXPECT_EQ(LLT::scalable_vector(4, LLT::pointer(0, 32)),
@@ -391,8 +398,8 @@ TEST(LowLevelTypeTest, MultiplyElements) {
 
 constexpr LLT CELLT = LLT();
 constexpr LLT CES32 = LLT::scalar(32);
-constexpr LLT CEV2S32 = LLT::fixed_vector(2, 32);
-constexpr LLT CESV2S32 = LLT::scalable_vector(2, 32);
+constexpr LLT CEV2S32 = LLT::fixed_vector(2, LLT::scalar(32));
+constexpr LLT CESV2S32 = LLT::scalable_vector(2, LLT::scalar(32));
 constexpr LLT CEP0 = LLT::pointer(0, 32);
 constexpr LLT CEV2P1 = LLT::fixed_vector(2, LLT::pointer(1, 64));
 
@@ -412,42 +419,44 @@ static_assert(CEV2P1.getScalarType() == LLT::pointer(1, 64));
 static_assert(CES32.getScalarType() == CES32);
 static_assert(CEV2S32.getScalarType() == CES32);
 static_assert(CEV2S32.changeElementType(CEP0) == LLT::fixed_vector(2, CEP0));
-static_assert(CEV2S32.changeElementSize(16) == LLT::fixed_vector(2, 16));
+static_assert(CEV2S32.changeElementType(LLT::scalar(16)) ==
+              LLT::fixed_vector(2, LLT::scalar(16)));
 static_assert(CEV2S32.changeElementCount(ElementCount::getFixed(4)) ==
-              LLT::fixed_vector(4, 32));
+              LLT::fixed_vector(4, LLT::scalar(32)));
 static_assert(CES32.isByteSized());
 static_assert(!LLT::scalar(7).isByteSized());
 static_assert(CES32.getScalarSizeInBits() == 32);
 static_assert(CEP0.getAddressSpace() == 0);
 static_assert(LLT::pointer(1, 64).getAddressSpace() == 1);
-static_assert(CEV2S32.multiplyElements(2) == LLT::fixed_vector(4, 32));
+static_assert(CEV2S32.multiplyElements(2) ==
+              LLT::fixed_vector(4, LLT::scalar(32)));
 static_assert(CEV2S32.divide(2) == LLT::scalar(32));
 static_assert(LLT::scalarOrVector(ElementCount::getFixed(1), LLT::scalar(32)) ==
               LLT::scalar(32));
 static_assert(LLT::scalarOrVector(ElementCount::getFixed(2), LLT::scalar(32)) ==
-              LLT::fixed_vector(2, 32));
+              LLT::fixed_vector(2, LLT::scalar(32)));
 static_assert(LLT::scalarOrVector(ElementCount::getFixed(2), CEP0) ==
               LLT::fixed_vector(2, CEP0));
 
 TEST(LowLevelTypeTest, ConstExpr) {
   EXPECT_EQ(LLT(), CELLT);
   EXPECT_EQ(LLT::scalar(32), CES32);
-  EXPECT_EQ(LLT::fixed_vector(2, 32), CEV2S32);
+  EXPECT_EQ(LLT::fixed_vector(2, LLT::scalar(32)), CEV2S32);
   EXPECT_EQ(LLT::pointer(0, 32), CEP0);
-  EXPECT_EQ(LLT::scalable_vector(2, 32), CESV2S32);
+  EXPECT_EQ(LLT::scalable_vector(2, LLT::scalar(32)), CESV2S32);
 }
 
 TEST(LowLevelTypeTest, IsFixedVector) {
   EXPECT_FALSE(LLT::scalar(32).isFixedVector());
-  EXPECT_TRUE(LLT::fixed_vector(2, 32).isFixedVector());
-  EXPECT_FALSE(LLT::scalable_vector(2, 32).isFixedVector());
-  EXPECT_FALSE(LLT::scalable_vector(1, 32).isFixedVector());
+  EXPECT_TRUE(LLT::fixed_vector(2, LLT::scalar(32)).isFixedVector());
+  EXPECT_FALSE(LLT::scalable_vector(2, LLT::scalar(32)).isFixedVector());
+  EXPECT_FALSE(LLT::scalable_vector(1, LLT::scalar(32)).isFixedVector());
 }
 
 TEST(LowLevelTypeTest, IsScalableVector) {
   EXPECT_FALSE(LLT::scalar(32).isScalableVector());
-  EXPECT_FALSE(LLT::fixed_vector(2, 32).isScalableVector());
-  EXPECT_TRUE(LLT::scalable_vector(2, 32).isScalableVector());
-  EXPECT_TRUE(LLT::scalable_vector(1, 32).isScalableVector());
+  EXPECT_FALSE(LLT::fixed_vector(2, LLT::scalar(32)).isScalableVector());
+  EXPECT_TRUE(LLT::scalable_vector(2, LLT::scalar(32)).isScalableVector());
+  EXPECT_TRUE(LLT::scalable_vector(1, LLT::scalar(32)).isScalableVector());
 }
 }
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index 15ec7e17130de4..8773a71e1df5ce 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -376,8 +376,10 @@ void LLTCodeGen::emitCxxConstructorCall(raw_ostream &OS) const {
     OS << "LLT::vector("
        << (Ty.isScalable() ? "ElementCount::getScalable("
                            : "ElementCount::getFixed(")
-       << Ty.getElementCount().getKnownMinValue() << "), "
-       << Ty.getScalarSizeInBits() << ")";
+       << Ty.getElementCount().getKnownMinValue() << "), ";
+    LLTCodeGen ScalarTy = LLTCodeGen(Ty.getElementType());
+    ScalarTy.emitCxxConstructorCall(OS);
+    OS << ")";
     return;
   }
   if (Ty.isPointer() && Ty.getSizeInBits() > 0) {
@@ -430,8 +432,8 @@ std::optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
   MVT VT(SVT);
 
   if (VT.isVector() && !VT.getVectorElementCount().isScalar())
-    return LLTCodeGen(
-        LLT::vector(VT.getVectorElementCount(), VT.getScalarSizeInBits()));
+    return LLTCodeGen(LLT::vector(VT.getVectorElementCount(),
+                                  LLT::scalar(VT.getScalarSizeInBits())));
 
   if (VT.isInteger() || VT.isFloatingPoint())
     return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));

>From 0330df24833a31d11cc9262bec5b09bc97afeeb5 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at brium.ai>
Date: Mon, 16 Dec 2024 20:02:53 +0000
Subject: [PATCH 2/4] fix GCD type

---
 llvm/lib/CodeGen/GlobalISel/Utils.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index c58472c8f58bd7..370d62dc7eb90c 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1247,7 +1247,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
     return OrigTy;
 
   if (OrigTy.isVector() && TargetTy.isVector()) {
-    LLT OrigElt = LLT::scalar(OrigTy.getScalarSizeInBits());
+    LLT OrigElt = OrigTy.getElementType();
 
     // TODO: The docstring for this function says the intention is to use this
     // function to build MERGE/UNMERGE instructions. It won't be the case that

>From 7fae8f1dd7a5f6e2e5780124fdc66c4dc890b1d8 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at brium.ai>
Date: Mon, 16 Dec 2024 20:03:11 +0000
Subject: [PATCH 3/4] remove obsolete test

---
 llvm/unittests/CodeGen/LowLevelTypeTest.cpp | 25 ---------------------
 1 file changed, 25 deletions(-)

diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index 125fc71bebab00..6ecd66c49e83a1 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -237,31 +237,6 @@ TEST(LowLevelTypeTest, ChangeNumElements) {
   EXPECT_EQ(NXV2P0, P0.changeElementCount(ElementCount::getScalable(2)));
 }
 
-#ifdef GTEST_HAS_DEATH_TEST
-#ifndef NDEBUG
-
-// Invalid to directly change the element size for pointers.
-TEST(LowLevelTypeTest, ChangeElementTypeDeath) {
-  const LLT S32 = LLT::scalar(32);
-  const LLT S64 = LLT::scalar(64);
-  const LLT P0 = LLT::pointer(0, 32);
-  const LLT V2P0 = LLT::fixed_vector(2, P0);
-
-  EXPECT_DEATH(P0.changeElementType(S64),
-               "invalid to directly change element type for pointers");
-  EXPECT_DEATH(V2P0.changeElementType(S64),
-               "invalid to directly change element type for pointers");
-
-  // Make sure this still fails even without a change in type.
-  EXPECT_DEATH(P0.changeElementType(S32),
-               "invalid to directly change element type for pointers");
-  EXPECT_DEATH(V2P0.changeElementType(S32),
-               "invalid to directly change element type for pointers");
-}
-
-#endif
-#endif
-
 TEST(LowLevelTypeTest, Pointer) {
   LLVMContext C;
   DataLayout DL("p64:64:64-p127:512:512:512-p16777215:65528:8");

>From 2ee10a5893f6386d7459970924ed63f683cde513 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at brium.ai>
Date: Mon, 16 Dec 2024 20:03:23 +0000
Subject: [PATCH 4/4] fix typo

---
 llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
index cd144d7b0993ce..1fe40bd4c8622a 100644
--- a/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/GISelUtilsTest.cpp
@@ -33,7 +33,7 @@ static const LLT V4S2 = LLT::fixed_vector(4, S2);
 static const LLT V3S4 = LLT::fixed_vector(3, S4);
 static const LLT V6S4 = LLT::fixed_vector(6, S4);
 
-static const LLT V3S5 = LLT::fixed_vector(2, S5);
+static const LLT V3S5 = LLT::fixed_vector(3, S5);
 
 static const LLT V2S6 = LLT::fixed_vector(2, S6);
 
@@ -339,7 +339,7 @@ TEST(GISelUtilsTest, getLCMType) {
   EXPECT_EQ(V6S4, getLCMType(V3S4, S8));
   EXPECT_EQ(V3S8, getLCMType(S8, V3S4));
 
-  EXPECT_EQ(V6S4, getLCMType(V3S4, V4S8));
+  EXPECT_EQ(V6S4, getLCMType(V3S4, P4));
   EXPECT_EQ(V3P4, getLCMType(P4, V3S4));
 
   EXPECT_EQ(V2S64, getLCMType(V2S64, P0));



More information about the llvm-commits mailing list