[llvm] [RISCV][GlobalISel] Legalize Scalable Vector Loads and Stores (PR #84965)
Jiahan Xie via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 22 05:47:49 PDT 2024
https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/84965
>From f5ad49a0078022c7f5bc4047858366b25c452d65 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Sun, 24 Mar 2024 20:11:14 -0400
Subject: [PATCH 1/8] use TypeSize for mem type since it can be scalable vector
---
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 640a425ffa735..8ac246c5868e6 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3475,8 +3475,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
LLT MemTy = MMO.getMemoryType();
MachineFunction &MF = MIRBuilder.getMF();
- unsigned MemSizeInBits = MemTy.getSizeInBits();
- unsigned MemStoreSizeInBits = 8 * MemTy.getSizeInBytes();
+ TypeSize MemSizeInBits = MemTy.getSizeInBits();
+ TypeSize MemStoreSizeInBits = MemTy.getSizeInBytes().multiplyCoefficientBy(8);
if (MemSizeInBits != MemStoreSizeInBits) {
if (MemTy.isVector())
@@ -3540,7 +3540,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
if (!isPowerOf2_32(MemSizeInBits)) {
// This load needs splitting into power of 2 sized loads.
- LargeSplitSize = llvm::bit_floor(MemSizeInBits);
+ LargeSplitSize = llvm::bit_floor(MemSizeInBits.getKnownMinValue());
SmallSplitSize = MemSizeInBits - LargeSplitSize;
} else {
// This is already a power of 2, but we still need to split this in half.
>From e77bae61e820063be5cb3cb29516745f854f0351 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 25 Mar 2024 12:16:36 -0400
Subject: [PATCH 2/8] legalize vload and vstore nxv1s8
---
llvm/lib/CodeGen/MIRParser/MIParser.cpp | 2 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 6 +++-
.../legalizer/rvv/legalize-load.mir | 29 ++++++++++++++++++
.../legalizer/rvv/legalize-store.mir | 30 +++++++++++++++++++
4 files changed, 65 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 1d16729aa3387..bf10794a100eb 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3388,7 +3388,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (expectAndConsume(MIToken::rparen))
return true;
- Size = MemoryType.getSizeInBytes();
+ Size = MemoryType.getSizeInBytes().getKnownMinValue();
}
MachinePointerInfo Ptr = MachinePointerInfo();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f033ea7250030..5fc22ce1d1e82 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -265,7 +265,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.legalForTypesWithMemDesc({{s32, p0, s8, 8},
{s32, p0, s16, 16},
{s32, p0, s32, 32},
- {p0, p0, sXLen, XLen}});
+ {p0, p0, sXLen, XLen},
+ {nxv1s8, p0, nxv1s8, 8}})
+ .widenScalarToNextPow2(0, /* MinSize = */ 8)
+ .lowerIfMemSizeNotByteSizePow2();
+
auto &ExtLoadActions =
getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
.legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
new file mode 100644
index 0000000000000..1b62f55520716
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -0,0 +1,29 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
+ %va = load <vscale x 1 x i8>, ptr %pa
+ ret <vscale x 1 x i8> %va
+ }
+
+...
+---
+name: vload_nx1i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nx1i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
new file mode 100644
index 0000000000000..b947861f7be50
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -0,0 +1,30 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
+ store <vscale x 1 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+...
+---
+name: vstore_nx1i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ PseudoRET
+
+...
>From def0a9f897913b84a1cc83c2e1eb135fa36ee191 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 27 Mar 2024 14:32:04 -0400
Subject: [PATCH 3/8] add more tests for i8 elmty
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 6 +-
.../legalizer/rvv/legalize-store.mir | 70 ++++++++++++++++++-
2 files changed, 74 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 5fc22ce1d1e82..82040b7e91d4a 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -266,7 +266,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
{s32, p0, s16, 16},
{s32, p0, s32, 32},
{p0, p0, sXLen, XLen},
- {nxv1s8, p0, nxv1s8, 8}})
+ {nxv1s8, p0, nxv1s8, 8},
+ {nxv2s8, p0, nxv2s8, 8},
+ {nxv4s8, p0, nxv4s8, 8},
+ {nxv8s8, p0, nxv8s8, 8},
+ {nxv16s8, p0, nxv16s8, 8}})
.widenScalarToNextPow2(0, /* MinSize = */ 8)
.lowerIfMemSizeNotByteSizePow2();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index b947861f7be50..66df14b6635d3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -7,7 +7,27 @@
store <vscale x 1 x i8> %b, ptr %pa, align 1
ret void
}
-
+
+ define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
+ ret <vscale x 2 x i8> %va
+ }
+
+ define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
+ ret <vscale x 4 x i8> %va
+ }
+
+ define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
+ ret <vscale x 8 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
...
---
name: vstore_nx1i8
@@ -28,3 +48,51 @@ body: |
PseudoRET
...
+---
+name: vload_nx2i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx16i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
>From e26805244337d51c56c5853e3ebccfd4e09f1910 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 16 Apr 2024 16:26:26 -0400
Subject: [PATCH 4/8] legalize all regular supported types from nxv1s8 to
nxv8s64 without align info
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 28 ++++++++++++++++---
1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 82040b7e91d4a..7d1d8891a0c60 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -266,13 +266,21 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
{s32, p0, s16, 16},
{s32, p0, s32, 32},
{p0, p0, sXLen, XLen},
- {nxv1s8, p0, nxv1s8, 8},
{nxv2s8, p0, nxv2s8, 8},
{nxv4s8, p0, nxv4s8, 8},
{nxv8s8, p0, nxv8s8, 8},
- {nxv16s8, p0, nxv16s8, 8}})
- .widenScalarToNextPow2(0, /* MinSize = */ 8)
- .lowerIfMemSizeNotByteSizePow2();
+ {nxv16s8, p0, nxv16s8, 8},
+ {nxv32s8, p0, nxv32s8, 8},
+ {nxv64s8, p0, nxv64s8, 8},
+ {nxv2s16, p0, nxv2s16, 16},
+ {nxv4s16, p0, nxv4s16, 16},
+ {nxv8s16, p0, nxv8s16, 16},
+ {nxv16s16, p0, nxv16s16, 16},
+ {nxv32s16, p0, nxv32s16, 16},
+ {nxv2s32, p0, nxv2s32, 32},
+ {nxv4s32, p0, nxv4s32, 32},
+ {nxv8s32, p0, nxv8s32, 32},
+ {nxv16s32, p0, nxv16s32, 32}});
auto &ExtLoadActions =
getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
@@ -287,6 +295,18 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
} else if (ST.hasStdExtD()) {
LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
}
+ if (ST.getELen() == 64)
+ LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+ {nxv1s16, p0, nxv1s16, 16},
+ {nxv1s32, p0, nxv1s32, 32}});
+ if (ST.hasVInstructionsI64())
+ LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+ {nxv2s64, p0, nxv2s64, 64},
+ {nxv4s64, p0, nxv4s64, 64},
+ {nxv8s64, p0, nxv8s64, 64}});
+ LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+ .lowerIfMemSizeNotByteSizePow2();
+
LoadStoreActions.clampScalar(0, s32, sXLen).lower();
ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
>From b779552c80c6627cffdd3953f272f07ba720163c Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 11 Jun 2024 11:18:07 -0400
Subject: [PATCH 5/8] legalize rvv load for all llt and update all
corresponding tests
---
.../CodeGen/GlobalISel/LegalityPredicates.cpp | 3 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 58 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.h | 1 +
.../legalizer/rvv/legalize-load.mir | 1022 ++++++++++++++++-
4 files changed, 1078 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
index 2c77ed8b06008..8fe48195c610b 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -194,7 +194,8 @@ LegalityPredicate LegalityPredicates::memSizeNotByteSizePow2(unsigned MMOIdx) {
return [=](const LegalityQuery &Query) {
const LLT MemTy = Query.MMODescrs[MMOIdx].MemoryTy;
return !MemTy.isByteSized() ||
- !llvm::has_single_bit<uint32_t>(MemTy.getSizeInBytes());
+ !llvm::has_single_bit<uint32_t>(
+ MemTy.getSizeInBytes().getKnownMinValue());
};
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 7d1d8891a0c60..e15e12e203ef0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -304,8 +304,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
{nxv2s64, p0, nxv2s64, 64},
{nxv4s64, p0, nxv4s64, 64},
{nxv8s64, p0, nxv8s64, 64}});
+
LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
- .lowerIfMemSizeNotByteSizePow2();
+ .lowerIfMemSizeNotByteSizePow2()
+ .custom();
LoadStoreActions.clampScalar(0, s32, sXLen).lower();
ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
@@ -679,6 +681,57 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
return true;
}
+bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+ MachineFunction *MF = MI.getParent()->getParent();
+ const DataLayout &DL = MIB.getDataLayout();
+ LLVMContext &Ctx = MF->getFunction().getContext();
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register PtrReg = MI.getOperand(1).getReg();
+ LLT LoadTy = MRI.getType(DstReg);
+ assert(LoadTy.isVector() && "Expect vector load.");
+ assert(STI.hasVInstructions() &&
+ (LoadTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
+ (LoadTy.getElementCount().getKnownMinValue() != 1 ||
+ STI.getELen() == 64) &&
+ "Load type must be legal integer or floating point vector.");
+
+ assert(MI.hasOneMemOperand() &&
+ "Load instructions only have one MemOperand.");
+ Align Alignment = (*MI.memoperands_begin())->getAlign();
+ MachineMemOperand *LoadMMO = MF->getMachineMemOperand(
+ MachinePointerInfo(), MachineMemOperand::MOLoad, LoadTy, Alignment);
+
+ const auto *TLI = STI.getTargetLowering();
+ EVT VT = EVT::getEVT(getTypeForLLT(LoadTy, Ctx));
+
+ if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *LoadMMO))
+ return true;
+
+ unsigned EltSizeBits = LoadTy.getScalarSizeInBits();
+ assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+ "Unexpected unaligned RVV load type");
+
+ // Calculate the new vector type with i8 elements
+ unsigned NumElements =
+ LoadTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+ LLT NewLoadTy = LLT::scalable_vector(NumElements, 8);
+
+ MachinePointerInfo PI = cast<GLoad>(MI).getMMO().getPointerInfo();
+ MachineMemOperand *NewLoadMMO = MF->getMachineMemOperand(
+ PI, MachineMemOperand::MOLoad, NewLoadTy, Alignment);
+
+ auto NewLoad = MIB.buildLoad(NewLoadTy, PtrReg, *NewLoadMMO);
+
+ MIB.buildBitcast(DstReg, NewLoad);
+
+ MI.eraseFromParent();
+
+ return true;
+}
+
/// Return the type of the mask type suitable for masking the provided
/// vector type. This is simply an i1 element type vector of the same
/// (possibly scalable) length.
@@ -856,6 +909,9 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeExt(MI, MIRBuilder);
case TargetOpcode::G_SPLAT_VECTOR:
return legalizeSplatVector(MI, MIRBuilder);
+ case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_STORE:
+ return legalizeLoadStore(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 5bb1e7a728278..0415d042c913b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -45,6 +45,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeLoadStore(MachineInstr &MI, MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 1b62f55520716..12f218863e400 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -3,19 +3,231 @@
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
--- |
- define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
- %va = load <vscale x 1 x i8>, ptr %pa
+ define <vscale x 1 x i8> @vload_nxv1i8(ptr %pa) #0 {
+ %va = load <vscale x 1 x i8>, ptr %pa, align 1
ret <vscale x 1 x i8> %va
}
+ define <vscale x 2 x i8> @vload_nxv2i8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
+ ret <vscale x 2 x i8> %va
+ }
+
+ define <vscale x 4 x i8> @vload_nxv4i8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
+ ret <vscale x 4 x i8> %va
+ }
+
+ define <vscale x 8 x i8> @vload_nxv8i8(ptr %pa) #0 {
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
+ ret <vscale x 8 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 32 x i8> @vload_nxv32i8(ptr %pa) #0 {
+ %va = load <vscale x 32 x i8>, ptr %pa, align 32
+ ret <vscale x 32 x i8> %va
+ }
+
+ define <vscale x 64 x i8> @vload_nxv64i8(ptr %pa) #0 {
+ %va = load <vscale x 64 x i8>, ptr %pa, align 64
+ ret <vscale x 64 x i8> %va
+ }
+
+ define <vscale x 1 x i16> @vload_nxv1i16(ptr %pa) #0 {
+ %va = load <vscale x 1 x i16>, ptr %pa, align 2
+ ret <vscale x 1 x i16> %va
+ }
+
+ define <vscale x 2 x i16> @vload_nxv2i16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i16>, ptr %pa, align 4
+ ret <vscale x 2 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 8 x i16> @vload_nxv8i16(ptr %pa) #0 {
+ %va = load <vscale x 8 x i16>, ptr %pa, align 16
+ ret <vscale x 8 x i16> %va
+ }
+
+ define <vscale x 16 x i16> @vload_nxv16i16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i16>, ptr %pa, align 32
+ ret <vscale x 16 x i16> %va
+ }
+
+ define <vscale x 32 x i16> @vload_nxv32i16(ptr %pa) #0 {
+ %va = load <vscale x 32 x i16>, ptr %pa, align 64
+ ret <vscale x 32 x i16> %va
+ }
+
+ define <vscale x 1 x i32> @vload_nxv1i32(ptr %pa) #0 {
+ %va = load <vscale x 1 x i32>, ptr %pa, align 4
+ ret <vscale x 1 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 4 x i32> @vload_nxv4i32(ptr %pa) #0 {
+ %va = load <vscale x 4 x i32>, ptr %pa, align 16
+ ret <vscale x 4 x i32> %va
+ }
+
+ define <vscale x 8 x i32> @vload_nxv8i32(ptr %pa) #0 {
+ %va = load <vscale x 8 x i32>, ptr %pa, align 32
+ ret <vscale x 8 x i32> %va
+ }
+
+ define <vscale x 16 x i32> @vload_nxv16i32(ptr %pa) #0 {
+ %va = load <vscale x 16 x i32>, ptr %pa, align 64
+ ret <vscale x 16 x i32> %va
+ }
+
+ define <vscale x 1 x i64> @vload_nxv1i64(ptr %pa) #0 {
+ %va = load <vscale x 1 x i64>, ptr %pa, align 8
+ ret <vscale x 1 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 4 x i64> @vload_nxv4i64(ptr %pa) #0 {
+ %va = load <vscale x 4 x i64>, ptr %pa, align 32
+ ret <vscale x 4 x i64> %va
+ }
+
+ define <vscale x 8 x i64> @vload_nxv8i64(ptr %pa) #0 {
+ %va = load <vscale x 8 x i64>, ptr %pa, align 64
+ ret <vscale x 8 x i64> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align1(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align2(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align64(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align1(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align2(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align4(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align2(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align256(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
+ %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+ ret <vscale x 1 x ptr> %va
+ }
+
+ define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
+ %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+ ret <vscale x 2 x ptr> %va
+ }
+
+ define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
+ %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+ ret <vscale x 8 x ptr> %va
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
...
---
-name: vload_nx1i8
+name: vload_nxv1i8
body: |
bb.1 (%ir-block.0):
liveins: $x10
- ; CHECK-LABEL: name: vload_nx1i8
+ ; CHECK-LABEL: name: vload_nxv1i8
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
@@ -27,3 +239,805 @@ body: |
$v8 = COPY %1(<vscale x 1 x s8>)
PseudoRET implicit $v8
+...
+---
+name: vload_nxv2i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv16i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv32i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv32i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv64i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv64i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv32i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv32i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv8i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv16i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv4i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv8i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv16i8_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align1
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv4i16_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align1
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align256
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align256
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i64_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv1ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x p0>)
+ PseudoRET implicit $v8m4
+
+...
>From 80ed1b954a59d4968af0a30a749541939a37ff22 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 25 Jun 2024 13:42:30 -0400
Subject: [PATCH 6/8] legalize store
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 41 +-
.../legalizer/rvv/legalize-store.mir | 1029 ++++++++++++++++-
2 files changed, 1010 insertions(+), 60 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e15e12e203ef0..d9543bda4c304 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -690,42 +691,46 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
Register DstReg = MI.getOperand(0).getReg();
Register PtrReg = MI.getOperand(1).getReg();
- LLT LoadTy = MRI.getType(DstReg);
- assert(LoadTy.isVector() && "Expect vector load.");
+ LLT DataTy = MRI.getType(DstReg);
+ assert(DataTy.isVector() && "Expect vector load.");
assert(STI.hasVInstructions() &&
- (LoadTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
- (LoadTy.getElementCount().getKnownMinValue() != 1 ||
+ (DataTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
+ (DataTy.getElementCount().getKnownMinValue() != 1 ||
STI.getELen() == 64) &&
"Load type must be legal integer or floating point vector.");
assert(MI.hasOneMemOperand() &&
"Load instructions only have one MemOperand.");
- Align Alignment = (*MI.memoperands_begin())->getAlign();
- MachineMemOperand *LoadMMO = MF->getMachineMemOperand(
- MachinePointerInfo(), MachineMemOperand::MOLoad, LoadTy, Alignment);
+ MachineMemOperand *MMO = *MI.memoperands_begin();
+ Align Alignment = MMO->getAlign();
const auto *TLI = STI.getTargetLowering();
- EVT VT = EVT::getEVT(getTypeForLLT(LoadTy, Ctx));
+ EVT VT = EVT::getEVT(getTypeForLLT(DataTy, Ctx));
- if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *LoadMMO))
+ if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))
return true;
- unsigned EltSizeBits = LoadTy.getScalarSizeInBits();
+ unsigned EltSizeBits = DataTy.getScalarSizeInBits();
assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
"Unexpected unaligned RVV load type");
// Calculate the new vector type with i8 elements
unsigned NumElements =
- LoadTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
- LLT NewLoadTy = LLT::scalable_vector(NumElements, 8);
+ DataTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+ LLT NewDataTy = LLT::scalable_vector(NumElements, 8);
- MachinePointerInfo PI = cast<GLoad>(MI).getMMO().getPointerInfo();
- MachineMemOperand *NewLoadMMO = MF->getMachineMemOperand(
- PI, MachineMemOperand::MOLoad, NewLoadTy, Alignment);
+ MachinePointerInfo PI = MMO->getPointerInfo();
+ MachineMemOperand *NewMMO =
+ MF->getMachineMemOperand(PI, MMO->getFlags(), NewDataTy, Alignment);
- auto NewLoad = MIB.buildLoad(NewLoadTy, PtrReg, *NewLoadMMO);
-
- MIB.buildBitcast(DstReg, NewLoad);
+ if (isa<GLoad>(MI)) {
+ auto NewLoad = MIB.buildLoad(NewDataTy, PtrReg, *NewMMO);
+ MIB.buildBitcast(DstReg, NewLoad);
+ } else {
+ assert(isa<GStore>(MI) && "Machine instructions must be Load/Store.");
+ auto BitcastedData = MIB.buildBitcast(NewDataTy, DstReg);
+ MIB.buildStore(BitcastedData, PtrReg, *NewMMO);
+ }
MI.eraseFromParent();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index 66df14b6635d3..317ddb4199802 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -3,31 +3,223 @@
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
--- |
- define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
+ define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
store <vscale x 1 x i8> %b, ptr %pa, align 1
ret void
}
-
- define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
- %va = load <vscale x 2 x i8>, ptr %pa, align 2
- ret <vscale x 2 x i8> %va
+
+ define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+ store <vscale x 2 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+ store <vscale x 4 x i8> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+ store <vscale x 8 x i8> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+ store <vscale x 32 x i8> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+ store <vscale x 64 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+ store <vscale x 1 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+ store <vscale x 2 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+ store <vscale x 8 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+ store <vscale x 16 x i16> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+ store <vscale x 32 x i16> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+ store <vscale x 1 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+ store <vscale x 4 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+ store <vscale x 8 x i32> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+ store <vscale x 16 x i32> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+ store <vscale x 1 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+ store <vscale x 4 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+ store <vscale x 8 x i64> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 256
+ ret void
+ }
+
+ define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 32
+ ret void
}
-
- define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
- %va = load <vscale x 4 x i8>, ptr %pa, align 4
- ret <vscale x 4 x i8> %va
+
+ define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+ store <vscale x 1 x ptr> %b, ptr %pa, align 4
+ ret void
}
-
- define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
- %va = load <vscale x 8 x i8>, ptr %pa, align 8
- ret <vscale x 8 x i8> %va
+
+ define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+ store <vscale x 2 x ptr> %b, ptr %pa, align 8
+ ret void
}
-
- define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
- %va = load <vscale x 16 x i8>, ptr %pa, align 16
- ret <vscale x 16 x i8> %va
+
+ define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+ store <vscale x 8 x ptr> %b, ptr %pa, align 32
+ ret void
}
-
+
+ attributes #0 = { "target-features"="+v" }
+
...
---
name: vstore_nx1i8
@@ -49,50 +241,803 @@ body: |
...
---
-name: vload_nx2i8
+name: vstore_nx2i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx8i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx32i8
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx64i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx64i8
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = COPY $v8m8
+ G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i16
body: |
bb.1 (%ir-block.0):
- liveins: $x10
-
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx8i16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
- $v8 = COPY %1(<vscale x 2 x s8>)
- PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ PseudoRET
...
---
-name: vload_nx4i8
+name: vstore_nx16i16
body: |
bb.1 (%ir-block.0):
- liveins: $x10
-
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx16i16
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
- $v8 = COPY %1(<vscale x 4 x s8>)
- PseudoRET implicit $v8
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ PseudoRET
...
---
-name: vload_nx8i8
+name: vstore_nx32i16
body: |
bb.1 (%ir-block.0):
- liveins: $x10
-
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx32i16
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
- $v8 = COPY %1(<vscale x 8 x s8>)
- PseudoRET implicit $v8
+ %1:_(<vscale x 32 x s16>) = COPY $v8m8
+ G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ PseudoRET
...
---
-name: vload_nx16i8
+name: vstore_nx1i32
body: |
bb.1 (%ir-block.0):
- liveins: $x10
-
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i32
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
- $v8m2 = COPY %1(<vscale x 16 x s8>)
- PseudoRET implicit $v8m2
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx4i32
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx8i32
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx16i32
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = COPY $v8m8
+ G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i64
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx4i64
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = COPY $v8m4
+ G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx8i64
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = COPY $v8m8
+ G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align1
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align2
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align64
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align1
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align2
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align4
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align2
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align4
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align256
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align256
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align4
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align8
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align32
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ PseudoRET
+
+...
+---
+name: vstore_nx1ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1ptr
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2ptr
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx8ptr
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ PseudoRET
...
>From 7c7deed28c2af87dd9480fca73746337ae2822e0 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Sun, 21 Jul 2024 17:06:51 -0400
Subject: [PATCH 7/8] change assertions to return false; make other minor
changes to pass llvm check
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 44 +++++++++++--------
.../Target/RISCV/GISel/RISCVLegalizerInfo.h | 4 +-
.../legalizer/rvv/legalize-load.mir | 9 ++--
.../legalizer/rvv/legalize-store.mir | 16 +++----
4 files changed, 40 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index d9543bda4c304..e0b9af75854c1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -296,10 +296,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
} else if (ST.hasStdExtD()) {
LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
}
- if (ST.getELen() == 64)
+ if (ST.hasVInstructions() && ST.getELen() == 64)
LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
{nxv1s16, p0, nxv1s16, 16},
{nxv1s32, p0, nxv1s32, 32}});
+
if (ST.hasVInstructionsI64())
LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
{nxv2s64, p0, nxv2s64, 64},
@@ -308,9 +309,13 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
.lowerIfMemSizeNotByteSizePow2()
- .custom();
+ .customIf([=](const LegalityQuery &Query) {
+ LLT Type = Query.Types[0];
+ return Type.isScalableVector();
+ })
+ .clampScalar(0, s32, sXLen)
+ .lower();
- LoadStoreActions.clampScalar(0, s32, sXLen).lower();
ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
@@ -683,24 +688,28 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
}
bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
+ LegalizerHelper &Helper,
MachineIRBuilder &MIB) const {
MachineRegisterInfo &MRI = *MIB.getMRI();
- MachineFunction *MF = MI.getParent()->getParent();
+ MachineFunction *MF = MI.getMF();
const DataLayout &DL = MIB.getDataLayout();
LLVMContext &Ctx = MF->getFunction().getContext();
Register DstReg = MI.getOperand(0).getReg();
Register PtrReg = MI.getOperand(1).getReg();
LLT DataTy = MRI.getType(DstReg);
- assert(DataTy.isVector() && "Expect vector load.");
- assert(STI.hasVInstructions() &&
- (DataTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
- (DataTy.getElementCount().getKnownMinValue() != 1 ||
- STI.getELen() == 64) &&
- "Load type must be legal integer or floating point vector.");
-
- assert(MI.hasOneMemOperand() &&
- "Load instructions only have one MemOperand.");
+ if (!DataTy.isVector())
+ return false;
+
+ if (!(STI.hasVInstructions() &&
+ (DataTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
+ (DataTy.getElementCount().getKnownMinValue() != 1 ||
+ STI.getELen() == 64)))
+ return false;
+
+ if (!MI.hasOneMemOperand())
+ return false;
+
MachineMemOperand *MMO = *MI.memoperands_begin();
Align Alignment = MMO->getAlign();
@@ -724,12 +733,11 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
MF->getMachineMemOperand(PI, MMO->getFlags(), NewDataTy, Alignment);
if (isa<GLoad>(MI)) {
- auto NewLoad = MIB.buildLoad(NewDataTy, PtrReg, *NewMMO);
- MIB.buildBitcast(DstReg, NewLoad);
+ Helper.bitcast(MI, 0, NewDataTy);
} else {
assert(isa<GStore>(MI) && "Machine instructions must be Load/Store.");
- auto BitcastedData = MIB.buildBitcast(NewDataTy, DstReg);
- MIB.buildStore(BitcastedData, PtrReg, *NewMMO);
+ Helper.bitcast(MI, 0, NewDataTy);
+ MIB.buildStore(MI.getOperand(0), PtrReg, *NewMMO);
}
MI.eraseFromParent();
@@ -916,7 +924,7 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeSplatVector(MI, MIRBuilder);
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
- return legalizeLoadStore(MI, MIRBuilder);
+ return legalizeLoadStore(MI, Helper, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 0415d042c913b..2fc28615e7630 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
#define LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
+#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/Register.h"
@@ -45,7 +46,8 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
- bool legalizeLoadStore(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeLoadStore(MachineInstr &MI, LegalizerHelper &Helper,
+ MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 12f218863e400..992daa3a22361 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -725,8 +725,7 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST %2:_(<vscale x 8 x s8>)
; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
@@ -821,8 +820,7 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST %2:_(<vscale x 8 x s8>)
; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
@@ -917,8 +915,7 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST %2:_(<vscale x 16 x s8>)
; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index 317ddb4199802..b91d25509646f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -1007,17 +1007,17 @@ body: |
name: vstore_nx2ptr
body: |
bb.1 (%ir-block.0):
- liveins: $v8, $x10
+ liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2ptr
- ; CHECK: liveins: $v8, $x10
+ ; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x p0>) = COPY $v8
+ %1:_(<vscale x 2 x p0>) = COPY $v8m2
G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
PseudoRET
@@ -1026,17 +1026,17 @@ body: |
name: vstore_nx8ptr
body: |
bb.1 (%ir-block.0):
- liveins: $x10, $v8m4
+ liveins: $x10, $v8m8
; CHECK-LABEL: name: vstore_nx8ptr
- ; CHECK: liveins: $x10, $v8m4
+ ; CHECK: liveins: $x10, $v8m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x p0>) = COPY $v8m4
+ %1:_(<vscale x 8 x p0>) = COPY $v8m8
G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
PseudoRET
>From 7501c8cd7f65ed09fdc1feab5c7ea03d5d1acddd Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 22 Jul 2024 08:47:17 -0400
Subject: [PATCH 8/8] remove unwanted erase
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 2 --
.../RISCV/GlobalISel/legalizer/rvv/legalize-load.mir | 9 ++++++---
.../RISCV/GlobalISel/legalizer/rvv/legalize-store.mir | 3 +++
3 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e0b9af75854c1..bc72f874a7c7d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -740,8 +740,6 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
MIB.buildStore(MI.getOperand(0), PtrReg, *NewMMO);
}
- MI.eraseFromParent();
-
return true;
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 992daa3a22361..12f218863e400 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -725,7 +725,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST %2:_(<vscale x 8 x s8>)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
@@ -820,7 +821,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST %2:_(<vscale x 8 x s8>)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
@@ -915,7 +917,8 @@ body: |
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST %2:_(<vscale x 16 x s8>)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index b91d25509646f..79bbc49ce181e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -728,6 +728,7 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
@@ -824,6 +825,7 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
@@ -920,6 +922,7 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
More information about the llvm-commits
mailing list