[llvm] [GISEL][RISCV] IRTranslator for scalable vector load (PR #80006)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 28 09:33:34 PST 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/80006

>From 584c14f7cdaf4f8b999d078dc152c46f814df3e9 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Sun, 14 Jan 2024 22:29:10 -0500
Subject: [PATCH 1/7] GISel support is in progress for G_LOAD

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dde1882f5eea83..d8f58079135f50 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20850,7 +20850,8 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
   unsigned Op = Inst.getOpcode();
   if (Op == Instruction::Add || Op == Instruction::Sub ||
       Op == Instruction::And || Op == Instruction::Or ||
-      Op == Instruction::Xor || Op == Instruction::InsertElement)
+      Op == Instruction::Xor || Op == Instruction::InsertElement ||
+      Op == Instruction::Load)
     return false;
 
   if (Inst.getType()->isScalableTy())

>From 34734cfed5633d960eed8b695f13e03d995b8827 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 29 Jan 2024 20:40:01 -0500
Subject: [PATCH 2/7] change the type of StoreSize to be TypeSize

---
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 7c986dbbc2c7c8..ba510543d41002 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1361,9 +1361,8 @@ static bool isSwiftError(const Value *V) {
 
 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
   const LoadInst &LI = cast<LoadInst>(U);
-
-  unsigned StoreSize = DL->getTypeStoreSize(LI.getType());
-  if (StoreSize == 0)
+  TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
+  if (StoreSize.isZero())
     return true;
 
   ArrayRef<Register> Regs = getOrCreateVRegs(LI);

>From 09404afb7ad685d0dc17878610e5fd5beef826dc Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 29 Jan 2024 20:43:42 -0500
Subject: [PATCH 3/7] MMO can be scalable vector type so we use TypeSize
 isKnownGT to compare load memory size with result size

---
 llvm/lib/CodeGen/MachineVerifier.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 2632b5b9feac9d..752d640336157d 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1191,7 +1191,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
         if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
           report("Generic extload must have a narrower memory type", MI);
       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
-        if (MMO.getSize() > ValTy.getSizeInBytes())
+        if (TypeSize::isKnownGT(MMO.getMemoryType().getSizeInBytes(), ValTy.getSizeInBytes()))
           report("load memory size cannot exceed result size", MI);
       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
         if (ValTy.getSizeInBytes() < MMO.getSize())

>From 7d7cbf885625f9b0323b31df08d0ebba8cf8a516 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 30 Jan 2024 08:57:38 -0500
Subject: [PATCH 4/7] simple test case for scalable vector load

---
 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll | 7 +++++++
 1 file changed, 7 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
new file mode 100644
index 00000000000000..5f98c6a7066c70
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
@@ -0,0 +1,7 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV32I %s
+
+define void @vload_vint8m1(ptr %pa) {
+  %va = load <vscale x 8 x i8>, ptr %pa
+	ret void
+}

>From 7508cf9d9f6dc49a4aceda9e8358a79cba7d5a6a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 30 Jan 2024 10:42:03 -0500
Subject: [PATCH 5/7] add test check for this simplest test case

---
 .../CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll   | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
index 5f98c6a7066c70..faf360a5a97db4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
@@ -2,6 +2,13 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV32I %s
 
 define void @vload_vint8m1(ptr %pa) {
-  %va = load <vscale x 8 x i8>, ptr %pa
-	ret void
+  ; RV32I-LABEL: name: vload_vint8m1
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32I-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+  ; RV32I-NEXT:   PseudoRET
+  %va = load <vscale x 8 x i8>, ptr %pa, align 8
+  ret void
 }

>From 9798ef69cf6384d4828d57ca785f59b70264635a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 30 Jan 2024 10:42:49 -0500
Subject: [PATCH 6/7] replace getSize with get size in bytes min value to
 accomodate scalable vectors; and have comprehensive test for all vector types
 and both rv32/64

---
 llvm/lib/CodeGen/MachineOperand.cpp           |   3 +-
 .../RISCV/GlobalISel/irtranslator/vec-ld.ll   | 498 +++++++++++++++++-
 2 files changed, 488 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index c7c0a1c20d57f4..24a93e83a1d36b 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1240,7 +1240,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
        << "unknown-address";
   }
   MachineOperand::printOperandOffset(OS, getOffset());
-  if (getSize() > 0 && getAlign() != getSize())
+  unsigned MinSize = getType().getSizeInBytes().getKnownMinValue();
+  if (MinSize > 0 && getAlign() != MinSize)
     OS << ", align " << getAlign().value();
   if (getAlign() != getBaseAlign())
     OS << ", basealign " << getBaseAlign().value();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
index faf360a5a97db4..c90572d04e30c8 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll
@@ -1,14 +1,488 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV32I %s
-
-define void @vload_vint8m1(ptr %pa) {
-  ; RV32I-LABEL: name: vload_vint8m1
-  ; RV32I: bb.1 (%ir-block.0):
-  ; RV32I-NEXT:   liveins: $x10
-  ; RV32I-NEXT: {{  $}}
-  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-  ; RV32I-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
-  ; RV32I-NEXT:   PseudoRET
-  %va = load <vscale x 8 x i8>, ptr %pa, align 8
-  ret void
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV64 %s
+
+define <vscale x 1 x i8>  @vload_nx1i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx1i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx1i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 1 x i8>, ptr %pa
+	ret <vscale x 1 x i8> %va
+}
+
+define <vscale x 2 x i8>  @vload_nx2i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx2i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx2i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 2 x i8>, ptr %pa
+	ret <vscale x 2 x i8> %va
+}
+
+define <vscale x 4 x i8>  @vload_nx4i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx4i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx4i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 4 x i8>, ptr %pa
+	ret <vscale x 4 x i8> %va
+}
+
+define <vscale x 8 x i8>  @vload_nx8i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx8i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx8i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 8 x i8>, ptr %pa
+	ret <vscale x 8 x i8> %va
+}
+
+define <vscale x 16 x i8>  @vload_nx16i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx16i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: vload_nx16i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  %va = load <vscale x 16 x i8>, ptr %pa
+	ret <vscale x 16 x i8> %va
+}
+
+define <vscale x 32 x i8>  @vload_nx32i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx32i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: vload_nx32i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  %va = load <vscale x 32 x i8>, ptr %pa
+	ret <vscale x 32 x i8> %va
+}
+
+define <vscale x 64 x i8>  @vload_nx64i8(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx64i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+  ; RV32-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: vload_nx64i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+  ; RV64-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  %va = load <vscale x 64 x i8>, ptr %pa
+	ret <vscale x 64 x i8> %va
+}
+
+define <vscale x 1 x i16>  @vload_nx1i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx1i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx1i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 1 x i16>, ptr %pa
+	ret <vscale x 1 x i16> %va
+}
+
+define <vscale x 2 x i16>  @vload_nx2i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx2i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx2i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 2 x i16>, ptr %pa
+	ret <vscale x 2 x i16> %va
+}
+
+define <vscale x 4 x i16>  @vload_nx4i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx4i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx4i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 4 x i16>, ptr %pa
+	ret <vscale x 4 x i16> %va
+}
+
+define <vscale x 8 x i16>  @vload_nx8i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx8i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16)
+  ; RV32-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: vload_nx8i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16)
+  ; RV64-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  %va = load <vscale x 8 x i16>, ptr %pa
+	ret <vscale x 8 x i16> %va
+}
+
+define <vscale x 16 x i16>  @vload_nx16i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx16i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32)
+  ; RV32-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: vload_nx16i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32)
+  ; RV64-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  %va = load <vscale x 16 x i16>, ptr %pa
+	ret <vscale x 16 x i16> %va
+}
+
+define <vscale x 32 x i16>  @vload_nx32i16(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx32i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64)
+  ; RV32-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: vload_nx32i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64)
+  ; RV64-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  %va = load <vscale x 32 x i16>, ptr %pa
+	ret <vscale x 32 x i16> %va
+}
+
+define <vscale x 1 x i32>  @vload_nx1i32(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx1i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx1i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 1 x i32>, ptr %pa
+	ret <vscale x 1 x i32> %va
+}
+
+define <vscale x 2 x i32>  @vload_nx2i32(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx2i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx2i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 2 x i32>, ptr %pa
+	ret <vscale x 2 x i32> %va
+}
+
+define <vscale x 4 x i32>  @vload_nx4i32(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx4i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16)
+  ; RV32-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: vload_nx4i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16)
+  ; RV64-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  %va = load <vscale x 4 x i32>, ptr %pa
+	ret <vscale x 4 x i32> %va
+}
+
+define <vscale x 8 x i32>  @vload_nx8i32(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx8i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32)
+  ; RV32-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: vload_nx8i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32)
+  ; RV64-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  %va = load <vscale x 8 x i32>, ptr %pa
+	ret <vscale x 8 x i32> %va
 }
+
+define <vscale x 16 x i32>  @vload_nx16i32(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx16i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64)
+  ; RV32-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: vload_nx16i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64)
+  ; RV64-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  %va = load <vscale x 16 x i32>, ptr %pa
+	ret <vscale x 16 x i32> %va
+}
+
+define <vscale x 1 x i64>  @vload_nx1i64(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx1i64
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8)
+  ; RV32-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: vload_nx1i64
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8)
+  ; RV64-NEXT:   $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  %va = load <vscale x 1 x i64>, ptr %pa
+	ret <vscale x 1 x i64> %va
+}
+
+define <vscale x 2 x i64>  @vload_nx2i64(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx2i64
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16)
+  ; RV32-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: vload_nx2i64
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16)
+  ; RV64-NEXT:   $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  %va = load <vscale x 2 x i64>, ptr %pa
+	ret <vscale x 2 x i64> %va
+}
+
+define <vscale x 4 x i64>  @vload_nx4i64(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx4i64
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32)
+  ; RV32-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: vload_nx4i64
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32)
+  ; RV64-NEXT:   $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  %va = load <vscale x 4 x i64>, ptr %pa
+	ret <vscale x 4 x i64> %va
+}
+
+define <vscale x 8 x i64>  @vload_nx8i64(ptr %pa) {
+  ; RV32-LABEL: name: vload_nx8i64
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64)
+  ; RV32-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: vload_nx8i64
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64-NEXT:   [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64)
+  ; RV64-NEXT:   $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  %va = load <vscale x 8 x i64>, ptr %pa
+	ret <vscale x 8 x i64> %va
+}
+

>From f58885496ff458afa566b99f896937b879a22787 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 28 Feb 2024 12:10:17 -0500
Subject: [PATCH 7/7] MMO getType

---
 llvm/lib/CodeGen/MachineVerifier.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 752d640336157d..7daf66de47ca77 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1191,7 +1191,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
         if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
           report("Generic extload must have a narrower memory type", MI);
       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
-        if (TypeSize::isKnownGT(MMO.getMemoryType().getSizeInBytes(), ValTy.getSizeInBytes()))
+        if (TypeSize::isKnownGT(MMO.getType().getSizeInBytes(), ValTy.getSizeInBytes()))
           report("load memory size cannot exceed result size", MI);
       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
         if (ValTy.getSizeInBytes() < MMO.getSize())



More information about the llvm-commits mailing list