[llvm] 5e32e79 - [MIPS GlobalISel] Legalize non-power-of-2 and unaligned load and store
Petar Avramovic via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 19 03:03:15 PST 2020
Author: Petar Avramovic
Date: 2020-02-19T12:02:27+01:00
New Revision: 5e32e7981b3a099e9b12b3b83b0eae0460095966
URL: https://github.com/llvm/llvm-project/commit/5e32e7981b3a099e9b12b3b83b0eae0460095966
DIFF: https://github.com/llvm/llvm-project/commit/5e32e7981b3a099e9b12b3b83b0eae0460095966.diff
LOG: [MIPS GlobalISel] Legalize non-power-of-2 and unaligned load and store
Custom legalize non-power-of-2 and unaligned load and store for MIPS32r5
and older, custom legalize non-power-of-2 load and store for MIPS32r6.
Don't attempt to combine non power of 2 loads or unaligned loads when
subtarget doesn't support them (MIPS32r5 and older).
Differential Revision: https://reviews.llvm.org/D74625
Added:
llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_split_because_of_memsize_or_align
llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
Modified:
llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 09498952bcf8..eaa699ede32f 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -119,6 +119,33 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
return true;
return false;
})
+ // Custom lower scalar memory access, up to 8 bytes, for:
+ // - non-power-of-2 MemSizes
+ // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
+ .customIf([=, &ST](const LegalityQuery &Query) {
+ if (!Query.Types[0].isScalar() || Query.Types[1] != p0 ||
+ Query.Types[0] == s1)
+ return false;
+
+ unsigned Size = Query.Types[0].getSizeInBits();
+ unsigned QueryMemSize = Query.MMODescrs[0].SizeInBits;
+ assert(QueryMemSize <= Size && "Scalar can't hold MemSize");
+
+ if (Size > 64 || QueryMemSize > 64)
+ return false;
+
+ if (!isPowerOf2_64(Query.MMODescrs[0].SizeInBits))
+ return true;
+
+ if (!ST.systemSupportsUnalignedAccess() &&
+ isUnalignedMemmoryAccess(QueryMemSize,
+ Query.MMODescrs[0].AlignInBits)) {
+ assert(QueryMemSize != 32 && "4 byte load and store are legal");
+ return true;
+ }
+
+ return false;
+ })
.minScalar(0, s32);
getActionDefinitionsBuilder(G_IMPLICIT_DEF)
@@ -135,7 +162,7 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
{s32, p0, 16, 8}})
.clampScalar(0, s32, s32);
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT})
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
.legalIf([](const LegalityQuery &Query) { return false; })
.maxScalar(0, s32);
@@ -311,6 +338,87 @@ bool MipsLegalizerInfo::legalizeCustom(MachineInstr &MI,
const LLT s64 = LLT::scalar(64);
switch (MI.getOpcode()) {
+ case G_LOAD:
+ case G_STORE: {
+ unsigned MemSize = (**MI.memoperands_begin()).getSize();
+ Register Val = MI.getOperand(0).getReg();
+ unsigned Size = MRI.getType(Val).getSizeInBits();
+
+ MachineMemOperand *MMOBase = *MI.memoperands_begin();
+
+ assert(MemSize <= 8 && "MemSize is too large");
+ assert(Size <= 64 && "Scalar size is too large");
+
+ // Split MemSize into two, P2HalfMemSize is largest power of two smaller
+ // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
+ unsigned P2HalfMemSize, RemMemSize;
+ if (isPowerOf2_64(MemSize)) {
+ P2HalfMemSize = RemMemSize = MemSize / 2;
+ } else {
+ P2HalfMemSize = 1 << Log2_32(MemSize);
+ RemMemSize = MemSize - P2HalfMemSize;
+ }
+
+ Register BaseAddr = MI.getOperand(1).getReg();
+ LLT PtrTy = MRI.getType(BaseAddr);
+ MachineFunction &MF = MIRBuilder.getMF();
+
+ auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize);
+ auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize);
+
+ if (MI.getOpcode() == G_STORE) {
+ // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
+ if (Size < 32)
+ Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0);
+ if (Size > 32 && Size < 64)
+ Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0);
+
+ auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
+ auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
+
+ if (MI.getOpcode() == G_STORE && MemSize <= 4) {
+ MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp);
+ auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8);
+ auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits);
+ MIRBuilder.buildStore(Shift, Addr, *RemMemOp);
+ } else {
+ auto Unmerge = MIRBuilder.buildUnmerge(s32, Val);
+ MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
+ MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp);
+ }
+ }
+
+ if (MI.getOpcode() == G_LOAD) {
+
+ if (MemSize <= 4) {
+ // This is anyextending load, use 4 byte lwr/lwl.
+ auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4);
+
+ if (Size == 32)
+ MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO);
+ else {
+ auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO);
+ MIRBuilder.buildTrunc(Val, Load.getReg(0));
+ }
+
+ } else {
+ auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
+ auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
+
+ auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp);
+ auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
+
+ if (Size == 64)
+ MIRBuilder.buildMerge(Val, {Load_P2Half, Load_Rem});
+ else {
+ auto Merge = MIRBuilder.buildMerge(s64, {Load_P2Half, Load_Rem});
+ MIRBuilder.buildTrunc(Val, Merge);
+ }
+ }
+ }
+ MI.eraseFromParent();
+ break;
+ }
case G_UITOFP: {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index f9d93ca29658..f288e734d9ff 100644
--- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -44,9 +44,22 @@ bool MipsPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
return false;
case TargetOpcode::G_LOAD:
case TargetOpcode::G_SEXTLOAD:
- case TargetOpcode::G_ZEXTLOAD:
+ case TargetOpcode::G_ZEXTLOAD: {
+ // Don't attempt to combine non power of 2 loads or unaligned loads when
+ // subtarget doesn't support them.
+ auto MMO = *MI.memoperands_begin();
+ const MipsSubtarget &STI =
+ static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget());
+ if (!isPowerOf2_64(MMO->getSize()))
+ return false;
+ bool isUnaligned = MMO->getSize() > MMO->getAlignment();
+ if (!STI.systemSupportsUnalignedAccess() && isUnaligned)
+ return false;
+
return Helper.tryCombineExtendingLoads(MI);
}
+ }
+
return false;
}
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_split_because_of_memsize_or_align b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_split_because_of_memsize_or_align
new file mode 100644
index 000000000000..69bae745a35b
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_split_because_of_memsize_or_align
@@ -0,0 +1,1146 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -mcpu=mips32r6 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32R6
+--- |
+
+ %struct.MemSize3_Align1 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align2 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align4 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align8 = type { [3 x i8], i8, [4 x i8] }
+ %struct.MemSize5_Align1 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align2 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align4 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align8 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize6_Align1 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align2 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align4 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align8 = type { [6 x i8], i16 }
+ %struct.MemSize7_Align1 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align2 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align4 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align8 = type { [7 x i8], i8 }
+
+ @double_align1 = common global double 0.000000e+00, align 1
+ @double_align2 = common global double 0.000000e+00, align 2
+ @double_align4 = common global double 0.000000e+00, align 4
+ @double_align8 = common global double 0.000000e+00, align 8
+ @i64_align1 = common global i64 0, align 1
+ @i64_align2 = common global i64 0, align 2
+ @i64_align4 = common global i64 0, align 4
+ @i64_align8 = common global i64 0, align 8
+
+ define i32 @load3align1(%struct.MemSize3_Align1* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align1* %S to i24*
+ %bf.load = load i24, i24* %0, align 1
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+ }
+
+ define i32 @load3align2(%struct.MemSize3_Align2* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align2* %S to i24*
+ %bf.load = load i24, i24* %0, align 2
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+ }
+
+ define i32 @load3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align4* %S to i24*
+ %bf.load = load i24, i24* %0, align 4
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+ }
+
+ define i32 @load3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align8* %S to i24*
+ %bf.load = load i24, i24* %0, align 8
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+ }
+
+ define i64 @load5align1(%struct.MemSize5_Align1* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align1* %S to i40*
+ %bf.load = load i40, i40* %0, align 1
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load5align2(%struct.MemSize5_Align2* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align2* %S to i40*
+ %bf.load = load i40, i40* %0, align 2
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load5align4(%struct.MemSize5_Align4* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align4* %S to i40*
+ %bf.load = load i40, i40* %0, align 4
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load5align8(%struct.MemSize5_Align8* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align8* %S to i40*
+ %bf.load = load i40, i40* %0, align 8
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load6align1(%struct.MemSize6_Align1* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align1* %S to i48*
+ %bf.load = load i48, i48* %0, align 1
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load6align2(%struct.MemSize6_Align2* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align2* %S to i48*
+ %bf.load = load i48, i48* %0, align 2
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load6align4(%struct.MemSize6_Align4* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align4* %S to i48*
+ %bf.load = load i48, i48* %0, align 4
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load6align8(%struct.MemSize6_Align8* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align8* %S to i48*
+ %bf.load = load i48, i48* %0, align 8
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load7align1(%struct.MemSize7_Align1* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align1* %S to i56*
+ %bf.load = load i56, i56* %0, align 1
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load7align2(%struct.MemSize7_Align2* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align2* %S to i56*
+ %bf.load = load i56, i56* %0, align 2
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load7align4(%struct.MemSize7_Align4* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align4* %S to i56*
+ %bf.load = load i56, i56* %0, align 4
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define i64 @load7align8(%struct.MemSize7_Align8* %S) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align8* %S to i56*
+ %bf.load = load i56, i56* %0, align 8
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+ }
+
+ define double @load_double_align1() {
+ entry:
+ %0 = load double, double* @double_align1, align 1
+ ret double %0
+ }
+
+ define double @load_double_align2() {
+ entry:
+ %0 = load double, double* @double_align2, align 2
+ ret double %0
+ }
+
+ define double @load_double_align4() {
+ entry:
+ %0 = load double, double* @double_align4, align 4
+ ret double %0
+ }
+
+ define double @load_double_align8() {
+ entry:
+ %0 = load double, double* @double_align8, align 8
+ ret double %0
+ }
+
+ define i64 @load_i64_align1() {
+ entry:
+ %0 = load i64, i64* @i64_align1, align 1
+ ret i64 %0
+ }
+
+ define i64 @load_i64_align2() {
+ entry:
+ %0 = load i64, i64* @i64_align2, align 2
+ ret i64 %0
+ }
+
+ define i64 @load_i64_align4() {
+ entry:
+ %0 = load i64, i64* @i64_align4, align 4
+ ret i64 %0
+ }
+
+ define i64 @load_i64_align8() {
+ entry:
+ %0 = load i64, i64* @i64_align8, align 8
+ ret i64 %0
+ }
+
+...
+---
+name: load3align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load3align1
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32: $v0 = COPY [[AND]](s32)
+ ; MIPS32: RetRA implicit $v0
+ ; MIPS32R6-LABEL: name: load3align1
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32R6: $v0 = COPY [[AND]](s32)
+ ; MIPS32R6: RetRA implicit $v0
+ %0:_(p0) = COPY $a0
+ %1:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.0, align 1)
+ %2:_(s32) = G_ZEXT %1(s24)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
+...
+---
+name: load3align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load3align2
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32: $v0 = COPY [[AND]](s32)
+ ; MIPS32: RetRA implicit $v0
+ ; MIPS32R6-LABEL: name: load3align2
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32R6: $v0 = COPY [[AND]](s32)
+ ; MIPS32R6: RetRA implicit $v0
+ %0:_(p0) = COPY $a0
+ %1:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.0, align 2)
+ %2:_(s32) = G_ZEXT %1(s24)
+ $v0 = COPY %2(s32)
+ RetRA implicit $v0
+
+...
+---
+name: load3align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: load3align4
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32: $v0 = COPY [[AND]](s32)
+ ; MIPS32: RetRA implicit $v0
+ ; MIPS32R6-LABEL: name: load3align4
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32R6: $v0 = COPY [[AND]](s32)
+ ; MIPS32R6: RetRA implicit $v0
+ %0:_(p0) = COPY $a0
+ %2:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.0, align 4)
+ %3:_(s32) = G_ZEXT %2(s24)
+ $v0 = COPY %3(s32)
+ RetRA implicit $v0
+
+...
+---
+name: load3align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: load3align8
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32: $v0 = COPY [[AND]](s32)
+ ; MIPS32: RetRA implicit $v0
+ ; MIPS32R6-LABEL: name: load3align8
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
+ ; MIPS32R6: $v0 = COPY [[AND]](s32)
+ ; MIPS32R6: RetRA implicit $v0
+ %0:_(p0) = COPY $a0
+ %2:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.0, align 8)
+ %3:_(s32) = G_ZEXT %2(s24)
+ $v0 = COPY %3(s32)
+ RetRA implicit $v0
+
+...
+---
+name: load5align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load5align1
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load5align1
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s40) = G_LOAD %0(p0) :: (load 5 from %ir.0, align 1)
+ %2:_(s64) = G_ZEXT %1(s40)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load5align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load5align2
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 2)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load5align2
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 2)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s40) = G_LOAD %0(p0) :: (load 5 from %ir.0, align 2)
+ %2:_(s64) = G_ZEXT %1(s40)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load5align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load5align4
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load5align4
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s40) = G_LOAD %0(p0) :: (load 5 from %ir.0, align 4)
+ %2:_(s64) = G_ZEXT %1(s40)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load5align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load5align8
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 8)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load5align8
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.0 + 4, align 8)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s40) = G_LOAD %0(p0) :: (load 5 from %ir.0, align 8)
+ %2:_(s64) = G_ZEXT %1(s40)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load6align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load6align1
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 1)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load6align1
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4, align 1)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s48) = G_LOAD %0(p0) :: (load 6 from %ir.0, align 1)
+ %2:_(s64) = G_ZEXT %1(s48)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load6align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load6align2
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load6align2
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s48) = G_LOAD %0(p0) :: (load 6 from %ir.0, align 2)
+ %2:_(s64) = G_ZEXT %1(s48)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load6align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load6align4
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4, align 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load6align4
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4, align 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s48) = G_LOAD %0(p0) :: (load 6 from %ir.0, align 4)
+ %2:_(s64) = G_ZEXT %1(s48)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load6align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load6align8
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4, align 8)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load6align8
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from %ir.0 + 4, align 8)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s48) = G_LOAD %0(p0) :: (load 6 from %ir.0, align 8)
+ %2:_(s64) = G_ZEXT %1(s48)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load7align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load7align1
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 1)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load7align1
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 1)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 1)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s56) = G_LOAD %0(p0) :: (load 7 from %ir.0, align 1)
+ %2:_(s64) = G_ZEXT %1(s56)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load7align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load7align2
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 2)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load7align2
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 2)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 2)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s56) = G_LOAD %0(p0) :: (load 7 from %ir.0, align 2)
+ %2:_(s64) = G_ZEXT %1(s56)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load7align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load7align4
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load7align4
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s56) = G_LOAD %0(p0) :: (load 7 from %ir.0, align 4)
+ %2:_(s64) = G_ZEXT %1(s56)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load7align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0
+
+ ; MIPS32-LABEL: name: load7align8
+ ; MIPS32: liveins: $a0
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 8)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load7align8
+ ; MIPS32R6: liveins: $a0
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.0, align 8)
+ ; MIPS32R6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from %ir.0 + 4, align 8)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; MIPS32R6: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C1]]
+ ; MIPS32R6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LOAD1]], [[C2]]
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %0:_(p0) = COPY $a0
+ %1:_(s56) = G_LOAD %0(p0) :: (load 7 from %ir.0, align 8)
+ %2:_(s64) = G_ZEXT %1(s56)
+ %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2(s64)
+ $v0 = COPY %3(s32)
+ $v1 = COPY %4(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load_double_align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_double_align1
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align1
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @double_align1, align 1)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @double_align1 + 4, align 1)
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
+ ; MIPS32: $d0 = COPY [[MV]](s64)
+ ; MIPS32: RetRA implicit $d0
+ ; MIPS32R6-LABEL: name: load_double_align1
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align1
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @double_align1, align 1)
+ ; MIPS32R6: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32R6: RetRA implicit $d0
+ %1:_(p0) = G_GLOBAL_VALUE @double_align1
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @double_align1, align 1)
+ $d0 = COPY %0(s64)
+ RetRA implicit $d0
+
+...
+---
+name: load_double_align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_double_align2
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align2
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @double_align2, align 2)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @double_align2 + 4, align 2)
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
+ ; MIPS32: $d0 = COPY [[MV]](s64)
+ ; MIPS32: RetRA implicit $d0
+ ; MIPS32R6-LABEL: name: load_double_align2
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align2
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @double_align2, align 2)
+ ; MIPS32R6: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32R6: RetRA implicit $d0
+ %1:_(p0) = G_GLOBAL_VALUE @double_align2
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @double_align2, align 2)
+ $d0 = COPY %0(s64)
+ RetRA implicit $d0
+
+...
+---
+name: load_double_align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_double_align4
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align4
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @double_align4)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @double_align4 + 4)
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
+ ; MIPS32: $d0 = COPY [[MV]](s64)
+ ; MIPS32: RetRA implicit $d0
+ ; MIPS32R6-LABEL: name: load_double_align4
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align4
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @double_align4, align 4)
+ ; MIPS32R6: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32R6: RetRA implicit $d0
+ %1:_(p0) = G_GLOBAL_VALUE @double_align4
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @double_align4, align 4)
+ $d0 = COPY %0(s64)
+ RetRA implicit $d0
+
+...
+---
+name: load_double_align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_double_align8
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align8
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @double_align8)
+ ; MIPS32: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32: RetRA implicit $d0
+ ; MIPS32R6-LABEL: name: load_double_align8
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align8
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @double_align8)
+ ; MIPS32R6: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32R6: RetRA implicit $d0
+ %1:_(p0) = G_GLOBAL_VALUE @double_align8
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @double_align8)
+ $d0 = COPY %0(s64)
+ RetRA implicit $d0
+
+...
+---
+name: load_i64_align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_i64_align1
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align1
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @i64_align1, align 1)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @i64_align1 + 4, align 1)
+ ; MIPS32: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32: $v1 = COPY [[LOAD1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load_i64_align1
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align1
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @i64_align1, align 1)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %1:_(p0) = G_GLOBAL_VALUE @i64_align1
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @i64_align1, align 1)
+ %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(s64)
+ $v0 = COPY %2(s32)
+ $v1 = COPY %3(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load_i64_align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_i64_align2
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align2
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @i64_align2, align 2)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @i64_align2 + 4, align 2)
+ ; MIPS32: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32: $v1 = COPY [[LOAD1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load_i64_align2
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align2
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @i64_align2, align 2)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %1:_(p0) = G_GLOBAL_VALUE @i64_align2
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @i64_align2, align 2)
+ %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(s64)
+ $v0 = COPY %2(s32)
+ $v1 = COPY %3(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load_i64_align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_i64_align4
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align4
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @i64_align4)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load 4 from @i64_align4 + 4)
+ ; MIPS32: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32: $v1 = COPY [[LOAD1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load_i64_align4
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align4
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @i64_align4, align 4)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %1:_(p0) = G_GLOBAL_VALUE @i64_align4
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @i64_align4, align 4)
+ %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(s64)
+ $v0 = COPY %2(s32)
+ $v1 = COPY %3(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
+---
+name: load_i64_align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ ; MIPS32-LABEL: name: load_i64_align8
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align8
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @i64_align8)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
+ ; MIPS32: $v0 = COPY [[UV]](s32)
+ ; MIPS32: $v1 = COPY [[UV1]](s32)
+ ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32R6-LABEL: name: load_i64_align8
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align8
+ ; MIPS32R6: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV]](p0) :: (dereferenceable load 8 from @i64_align8)
+ ; MIPS32R6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
+ ; MIPS32R6: $v0 = COPY [[UV]](s32)
+ ; MIPS32R6: $v1 = COPY [[UV1]](s32)
+ ; MIPS32R6: RetRA implicit $v0, implicit $v1
+ %1:_(p0) = G_GLOBAL_VALUE @i64_align8
+ %0:_(s64) = G_LOAD %1(p0) :: (dereferenceable load 8 from @i64_align8)
+ %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(s64)
+ $v0 = COPY %2(s32)
+ $v1 = COPY %3(s32)
+ RetRA implicit $v0, implicit $v1
+
+...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
new file mode 100644
index 000000000000..3804eff22fa7
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_split_because_of_memsize_or_align.mir
@@ -0,0 +1,1113 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -mcpu=mips32r6 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32R6
+--- |
+
+ %struct.MemSize3_Align1 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align2 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align4 = type { [3 x i8], i8 }
+ %struct.MemSize3_Align8 = type { [3 x i8], i8, [4 x i8] }
+ %struct.MemSize5_Align1 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align2 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align4 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize5_Align8 = type <{ [5 x i8], i16, i8 }>
+ %struct.MemSize6_Align1 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align2 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align4 = type { [6 x i8], i16 }
+ %struct.MemSize6_Align8 = type { [6 x i8], i16 }
+ %struct.MemSize7_Align1 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align2 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align4 = type { [7 x i8], i8 }
+ %struct.MemSize7_Align8 = type { [7 x i8], i8 }
+
+ @double_align1 = common global double 0.000000e+00, align 1
+ @double_align2 = common global double 0.000000e+00, align 2
+ @double_align4 = common global double 0.000000e+00, align 4
+ @double_align8 = common global double 0.000000e+00, align 8
+ @i64_align1 = common global i64 0, align 1
+ @i64_align2 = common global i64 0, align 2
+ @i64_align4 = common global i64 0, align 4
+ @i64_align8 = common global i64 0, align 8
+
+ define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align1* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 1
+ ret void
+ }
+
+ define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align2* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 2
+ ret void
+ }
+
+ define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align4* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 4
+ ret void
+ }
+
+ define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+ entry:
+ %0 = bitcast %struct.MemSize3_Align8* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 8
+ ret void
+ }
+
+ define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align1* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 1
+ ret void
+ }
+
+ define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align2* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 2
+ ret void
+ }
+
+ define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align4* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 4
+ ret void
+ }
+
+ define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize5_Align8* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 8
+ ret void
+ }
+
+ define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align1* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 1
+ ret void
+ }
+
+ define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align2* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 2
+ ret void
+ }
+
+ define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align4* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 4
+ ret void
+ }
+
+ define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize6_Align8* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 8
+ ret void
+ }
+
+ define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align1* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 1
+ ret void
+ }
+
+ define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align2* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 2
+ ret void
+ }
+
+ define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align4* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 4
+ ret void
+ }
+
+ define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
+ entry:
+ %0 = bitcast %struct.MemSize7_Align8* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 8
+ ret void
+ }
+
+ define void @store_double_align1(double %a) {
+ entry:
+ store double %a, double* @double_align1, align 1
+ ret void
+ }
+
+ define void @store_double_align2(double %a) {
+ entry:
+ store double %a, double* @double_align2, align 2
+ ret void
+ }
+
+ define void @store_double_align4(double %a) {
+ entry:
+ store double %a, double* @double_align4, align 4
+ ret void
+ }
+
+ define void @store_double_align8(double %a) {
+ entry:
+ store double %a, double* @double_align8, align 8
+ ret void
+ }
+
+ define void @store_i64_align1(i64 %a) {
+ entry:
+ store i64 %a, i64* @i64_align1, align 1
+ ret void
+ }
+
+ define void @store_i64_align2(i64 signext %a) {
+ entry:
+ store i64 %a, i64* @i64_align2, align 2
+ ret void
+ }
+
+ define void @store_i64_align4(i64 %a) {
+ entry:
+ store i64 %a, i64* @i64_align4, align 4
+ ret void
+ }
+
+ define void @store_i64_align8(i64 signext %a) {
+ entry:
+ store i64 %a, i64* @i64_align8, align 8
+ ret void
+ }
+
+...
+---
+name: store3align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store3align1
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 1 into %ir.0)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 1)
+ ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C3]](s32)
+ ; MIPS32: G_STORE [[LSHR1]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store3align1
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 1)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %1:_(s32) = COPY $a1
+ %2:_(s24) = G_TRUNC %1(s32)
+ G_STORE %2(s24), %0(p0) :: (store 3 into %ir.0, align 1)
+ RetRA
+
+...
+---
+name: store3align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store3align2
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store3align2
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %1:_(s32) = COPY $a1
+ %2:_(s24) = G_TRUNC %1(s32)
+ G_STORE %2(s24), %0(p0) :: (store 3 into %ir.0, align 2)
+ RetRA
+
+...
+---
+name: store3align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store3align4
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 4)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store3align4
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 4)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %1:_(s32) = COPY $a1
+ %2:_(s24) = G_TRUNC %1(s32)
+ G_STORE %2(s24), %0(p0) :: (store 3 into %ir.0, align 4)
+ RetRA
+
+...
+---
+name: store3align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store3align8
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 8)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store3align8
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 8)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 8)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %1:_(s32) = COPY $a1
+ %2:_(s24) = G_TRUNC %1(s32)
+ G_STORE %2(s24), %0(p0) :: (store 3 into %ir.0, align 8)
+ RetRA
+
+...
+---
+name: store5align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store5align1
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store5align1
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s40) = G_TRUNC %1(s64)
+ G_STORE %4(s40), %0(p0) :: (store 5 into %ir.0, align 1)
+ RetRA
+
+...
+---
+name: store5align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store5align2
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store5align2
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 2)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s40) = G_TRUNC %1(s64)
+ G_STORE %4(s40), %0(p0) :: (store 5 into %ir.0, align 2)
+ RetRA
+
+...
+---
+name: store5align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store5align4
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store5align4
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s40) = G_TRUNC %1(s64)
+ G_STORE %4(s40), %0(p0) :: (store 5 into %ir.0, align 4)
+ RetRA
+
+...
+---
+name: store5align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store5align8
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store5align8
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 8)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s40) = G_TRUNC %1(s64)
+ G_STORE %4(s40), %0(p0) :: (store 5 into %ir.0, align 8)
+ RetRA
+
+...
+---
+name: store6align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store6align1
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 5)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store6align1
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 1)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s48) = G_TRUNC %1(s64)
+ G_STORE %4(s48), %0(p0) :: (store 6 into %ir.0, align 1)
+ RetRA
+
+...
+---
+name: store6align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store6align2
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store6align2
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s48) = G_TRUNC %1(s64)
+ G_STORE %4(s48), %0(p0) :: (store 6 into %ir.0, align 2)
+ RetRA
+
+...
+---
+name: store6align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store6align4
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store6align4
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s48) = G_TRUNC %1(s64)
+ G_STORE %4(s48), %0(p0) :: (store 6 into %ir.0, align 4)
+ RetRA
+
+...
+---
+name: store6align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store6align8
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store6align8
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s48) = G_TRUNC %1(s64)
+ G_STORE %4(s48), %0(p0) :: (store 6 into %ir.0, align 8)
+ RetRA
+
+...
+---
+name: store7align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store7align1
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C2]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4)
+ ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C3]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD2]](p0) :: (store 1 into %ir.0 + 5)
+ ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C4]](s32)
+ ; MIPS32: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store7align1
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 1)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 1)
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s56) = G_TRUNC %1(s64)
+ G_STORE %4(s56), %0(p0) :: (store 7 into %ir.0, align 1)
+ RetRA
+
+...
+---
+name: store7align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store7align2
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store7align2
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 2)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4)
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s56) = G_TRUNC %1(s64)
+ G_STORE %4(s56), %0(p0) :: (store 7 into %ir.0, align 2)
+ RetRA
+
+...
+---
+name: store7align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store7align4
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store7align4
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 4)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s56) = G_TRUNC %1(s64)
+ G_STORE %4(s56), %0(p0) :: (store 7 into %ir.0, align 4)
+ RetRA
+
+...
+---
+name: store7align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a2, $a3
+
+ ; MIPS32-LABEL: name: store7align8
+ ; MIPS32: liveins: $a0, $a2, $a3
+ ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
+ ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store7align8
+ ; MIPS32R6: liveins: $a0, $a2, $a3
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2
+ ; MIPS32R6: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3
+ ; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
+ ; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32R6: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
+ ; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
+ ; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
+ ; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 8)
+ ; MIPS32R6: RetRA
+ %0:_(p0) = COPY $a0
+ %2:_(s32) = COPY $a2
+ %3:_(s32) = COPY $a3
+ %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s56) = G_TRUNC %1(s64)
+ G_STORE %4(s56), %0(p0) :: (store 7 into %ir.0, align 8)
+ RetRA
+
+...
+---
+name: store_double_align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; MIPS32-LABEL: name: store_double_align1
+ ; MIPS32: liveins: $d6
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align1
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; MIPS32: G_STORE [[UV]](s32), [[GV]](p0) :: (store 4 into @double_align1, align 1)
+ ; MIPS32: G_STORE [[UV1]](s32), [[PTR_ADD]](p0) :: (store 4 into @double_align1 + 4, align 1)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_double_align1
+ ; MIPS32R6: liveins: $d6
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align1
+ ; MIPS32R6: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @double_align1, align 1)
+ ; MIPS32R6: RetRA
+ %0:_(s64) = COPY $d6
+ %1:_(p0) = G_GLOBAL_VALUE @double_align1
+ G_STORE %0(s64), %1(p0) :: (store 8 into @double_align1, align 1)
+ RetRA
+
+...
+---
+name: store_double_align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; MIPS32-LABEL: name: store_double_align2
+ ; MIPS32: liveins: $d6
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align2
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; MIPS32: G_STORE [[UV]](s32), [[GV]](p0) :: (store 4 into @double_align2, align 2)
+ ; MIPS32: G_STORE [[UV1]](s32), [[PTR_ADD]](p0) :: (store 4 into @double_align2 + 4, align 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_double_align2
+ ; MIPS32R6: liveins: $d6
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align2
+ ; MIPS32R6: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @double_align2, align 2)
+ ; MIPS32R6: RetRA
+ %0:_(s64) = COPY $d6
+ %1:_(p0) = G_GLOBAL_VALUE @double_align2
+ G_STORE %0(s64), %1(p0) :: (store 8 into @double_align2, align 2)
+ RetRA
+
+...
+---
+name: store_double_align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; MIPS32-LABEL: name: store_double_align4
+ ; MIPS32: liveins: $d6
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align4
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; MIPS32: G_STORE [[UV]](s32), [[GV]](p0) :: (store 4 into @double_align4)
+ ; MIPS32: G_STORE [[UV1]](s32), [[PTR_ADD]](p0) :: (store 4 into @double_align4 + 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_double_align4
+ ; MIPS32R6: liveins: $d6
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align4
+ ; MIPS32R6: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @double_align4, align 4)
+ ; MIPS32R6: RetRA
+ %0:_(s64) = COPY $d6
+ %1:_(p0) = G_GLOBAL_VALUE @double_align4
+ G_STORE %0(s64), %1(p0) :: (store 8 into @double_align4, align 4)
+ RetRA
+
+...
+---
+name: store_double_align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $d6
+
+ ; MIPS32-LABEL: name: store_double_align8
+ ; MIPS32: liveins: $d6
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align8
+ ; MIPS32: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @double_align8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_double_align8
+ ; MIPS32R6: liveins: $d6
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s64) = COPY $d6
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @double_align8
+ ; MIPS32R6: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @double_align8)
+ ; MIPS32R6: RetRA
+ %0:_(s64) = COPY $d6
+ %1:_(p0) = G_GLOBAL_VALUE @double_align8
+ G_STORE %0(s64), %1(p0) :: (store 8 into @double_align8)
+ RetRA
+
+...
+---
+name: store_i64_align1
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store_i64_align1
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align1
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @i64_align1, align 1)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store 4 into @i64_align1 + 4, align 1)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_i64_align1
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align1
+ ; MIPS32R6: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @i64_align1, align 1)
+ ; MIPS32R6: RetRA
+ %1:_(s32) = COPY $a0
+ %2:_(s32) = COPY $a1
+ %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
+ %3:_(p0) = G_GLOBAL_VALUE @i64_align1
+ G_STORE %0(s64), %3(p0) :: (store 8 into @i64_align1, align 1)
+ RetRA
+
+...
+---
+name: store_i64_align2
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store_i64_align2
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align2
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @i64_align2, align 2)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store 4 into @i64_align2 + 4, align 2)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_i64_align2
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align2
+ ; MIPS32R6: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @i64_align2, align 2)
+ ; MIPS32R6: RetRA
+ %1:_(s32) = COPY $a0
+ %2:_(s32) = COPY $a1
+ %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
+ %3:_(p0) = G_GLOBAL_VALUE @i64_align2
+ G_STORE %0(s64), %3(p0) :: (store 8 into @i64_align2, align 2)
+ RetRA
+
+...
+---
+name: store_i64_align4
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store_i64_align4
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align4
+ ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s32)
+ ; MIPS32: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @i64_align4)
+ ; MIPS32: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store 4 into @i64_align4 + 4)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_i64_align4
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align4
+ ; MIPS32R6: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @i64_align4, align 4)
+ ; MIPS32R6: RetRA
+ %1:_(s32) = COPY $a0
+ %2:_(s32) = COPY $a1
+ %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
+ %3:_(p0) = G_GLOBAL_VALUE @i64_align4
+ G_STORE %0(s64), %3(p0) :: (store 8 into @i64_align4, align 4)
+ RetRA
+
+...
+---
+name: store_i64_align8
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $a0, $a1
+
+ ; MIPS32-LABEL: name: store_i64_align8
+ ; MIPS32: liveins: $a0, $a1
+ ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; MIPS32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align8
+ ; MIPS32: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @i64_align8)
+ ; MIPS32: RetRA
+ ; MIPS32R6-LABEL: name: store_i64_align8
+ ; MIPS32R6: liveins: $a0, $a1
+ ; MIPS32R6: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32R6: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32R6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; MIPS32R6: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i64_align8
+ ; MIPS32R6: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @i64_align8)
+ ; MIPS32R6: RetRA
+ %1:_(s32) = COPY $a0
+ %2:_(s32) = COPY $a1
+ %0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
+ %3:_(p0) = G_GLOBAL_VALUE @i64_align8
+ G_STORE %0(s64), %3(p0) :: (store 8 into @i64_align8)
+ RetRA
+
+...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
new file mode 100644
index 000000000000..c7a70d56f8a0
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
@@ -0,0 +1,692 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r6 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32R6
+
+%struct.MemSize3_Align1 = type { [3 x i8], i8 }
+%struct.MemSize3_Align2 = type { [3 x i8], i8 }
+%struct.MemSize3_Align4 = type { [3 x i8], i8 }
+%struct.MemSize3_Align8 = type { [3 x i8], i8, [4 x i8] }
+%struct.MemSize5_Align1 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align2 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align4 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align8 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize6_Align1 = type { [6 x i8], i16 }
+%struct.MemSize6_Align2 = type { [6 x i8], i16 }
+%struct.MemSize6_Align4 = type { [6 x i8], i16 }
+%struct.MemSize6_Align8 = type { [6 x i8], i16 }
+%struct.MemSize7_Align1 = type { [7 x i8], i8 }
+%struct.MemSize7_Align2 = type { [7 x i8], i8 }
+%struct.MemSize7_Align4 = type { [7 x i8], i8 }
+%struct.MemSize7_Align8 = type { [7 x i8], i8 }
+
+ at double_align1 = common global double 0.000000e+00, align 1
+ at double_align2 = common global double 0.000000e+00, align 2
+ at double_align4 = common global double 0.000000e+00, align 4
+ at double_align8 = common global double 0.000000e+00, align 8
+ at i64_align1 = common global i64 0, align 1
+ at i64_align2 = common global i64 0, align 2
+ at i64_align4 = common global i64 0, align 4
+ at i64_align8 = common global i64 0, align 8
+
+define i32 @load3align1(%struct.MemSize3_Align1* %S) {
+; MIPS32-LABEL: load3align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: lui $2, 255
+; MIPS32-NEXT: ori $2, $2, 65535
+; MIPS32-NEXT: and $2, $1, $2
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load3align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lui $2, 255
+; MIPS32R6-NEXT: ori $2, $2, 65535
+; MIPS32R6-NEXT: and $2, $1, $2
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align1* %S to i24*
+ %bf.load = load i24, i24* %0, align 1
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+}
+
+define i32 @load3align2(%struct.MemSize3_Align2* %S) {
+; MIPS32-LABEL: load3align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: lui $2, 255
+; MIPS32-NEXT: ori $2, $2, 65535
+; MIPS32-NEXT: and $2, $1, $2
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load3align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lui $2, 255
+; MIPS32R6-NEXT: ori $2, $2, 65535
+; MIPS32R6-NEXT: and $2, $1, $2
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align2* %S to i24*
+ %bf.load = load i24, i24* %0, align 2
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+}
+
+define i32 @load3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+; MIPS32-LABEL: load3align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lui $2, 255
+; MIPS32-NEXT: ori $2, $2, 65535
+; MIPS32-NEXT: and $2, $1, $2
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load3align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lui $2, 255
+; MIPS32R6-NEXT: ori $2, $2, 65535
+; MIPS32R6-NEXT: and $2, $1, $2
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align4* %S to i24*
+ %bf.load = load i24, i24* %0, align 4
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+}
+
+define i32 @load3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+; MIPS32-LABEL: load3align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lui $2, 255
+; MIPS32-NEXT: ori $2, $2, 65535
+; MIPS32-NEXT: and $2, $1, $2
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load3align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lui $2, 255
+; MIPS32R6-NEXT: ori $2, $2, 65535
+; MIPS32R6-NEXT: and $2, $1, $2
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align8* %S to i24*
+ %bf.load = load i24, i24* %0, align 8
+ %bf.cast = zext i24 %bf.load to i32
+ ret i32 %bf.cast
+}
+
+define i64 @load5align1(%struct.MemSize5_Align1* %S) {
+; MIPS32-LABEL: load5align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: lbu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 255
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load5align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lbu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 255
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align1* %S to i40*
+ %bf.load = load i40, i40* %0, align 1
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load5align2(%struct.MemSize5_Align2* %S) {
+; MIPS32-LABEL: load5align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: lbu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 255
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load5align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lbu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 255
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align2* %S to i40*
+ %bf.load = load i40, i40* %0, align 2
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load5align4(%struct.MemSize5_Align4* %S) {
+; MIPS32-LABEL: load5align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lbu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 255
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load5align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lbu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 255
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align4* %S to i40*
+ %bf.load = load i40, i40* %0, align 4
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load5align8(%struct.MemSize5_Align8* %S) {
+; MIPS32-LABEL: load5align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lbu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 255
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load5align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lbu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 255
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align8* %S to i40*
+ %bf.load = load i40, i40* %0, align 8
+ %bf.cast = zext i40 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load6align1(%struct.MemSize6_Align1* %S) {
+; MIPS32-LABEL: load6align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 7($4)
+; MIPS32-NEXT: lwr $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 65535
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load6align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lhu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 65535
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align1* %S to i48*
+ %bf.load = load i48, i48* %0, align 1
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load6align2(%struct.MemSize6_Align2* %S) {
+; MIPS32-LABEL: load6align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: lhu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 65535
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load6align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lhu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 65535
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align2* %S to i48*
+ %bf.load = load i48, i48* %0, align 2
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load6align4(%struct.MemSize6_Align4* %S) {
+; MIPS32-LABEL: load6align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lhu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 65535
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load6align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lhu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 65535
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align4* %S to i48*
+ %bf.load = load i48, i48* %0, align 4
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load6align8(%struct.MemSize6_Align8* %S) {
+; MIPS32-LABEL: load6align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lhu $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: andi $3, $2, 65535
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load6align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lhu $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: andi $3, $2, 65535
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align8* %S to i48*
+ %bf.load = load i48, i48* %0, align 8
+ %bf.cast = zext i48 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load7align1(%struct.MemSize7_Align1* %S) {
+; MIPS32-LABEL: load7align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 7($4)
+; MIPS32-NEXT: lwr $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: lui $4, 255
+; MIPS32-NEXT: ori $4, $4, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: and $3, $2, $4
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load7align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lw $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: lui $4, 255
+; MIPS32R6-NEXT: ori $4, $4, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: and $3, $2, $4
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align1* %S to i56*
+ %bf.load = load i56, i56* %0, align 1
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load7align2(%struct.MemSize7_Align2* %S) {
+; MIPS32-LABEL: load7align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: # implicit-def: $at
+; MIPS32-NEXT: lwl $1, 3($4)
+; MIPS32-NEXT: lwr $1, 0($4)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 7($4)
+; MIPS32-NEXT: lwr $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: lui $4, 255
+; MIPS32-NEXT: ori $4, $4, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: and $3, $2, $4
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load7align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lw $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: lui $4, 255
+; MIPS32R6-NEXT: ori $4, $4, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: and $3, $2, $4
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align2* %S to i56*
+ %bf.load = load i56, i56* %0, align 2
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load7align4(%struct.MemSize7_Align4* %S) {
+; MIPS32-LABEL: load7align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lw $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: lui $4, 255
+; MIPS32-NEXT: ori $4, $4, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: and $3, $2, $4
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load7align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lw $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: lui $4, 255
+; MIPS32R6-NEXT: ori $4, $4, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: and $3, $2, $4
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align4* %S to i56*
+ %bf.load = load i56, i56* %0, align 4
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define i64 @load7align8(%struct.MemSize7_Align8* %S) {
+; MIPS32-LABEL: load7align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lw $1, 0($4)
+; MIPS32-NEXT: lw $2, 4($4)
+; MIPS32-NEXT: addiu $3, $zero, 65535
+; MIPS32-NEXT: lui $4, 255
+; MIPS32-NEXT: ori $4, $4, 65535
+; MIPS32-NEXT: and $1, $1, $3
+; MIPS32-NEXT: and $3, $2, $4
+; MIPS32-NEXT: move $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load7align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lw $1, 0($4)
+; MIPS32R6-NEXT: lw $2, 4($4)
+; MIPS32R6-NEXT: addiu $3, $zero, 65535
+; MIPS32R6-NEXT: lui $4, 255
+; MIPS32R6-NEXT: ori $4, $4, 65535
+; MIPS32R6-NEXT: and $1, $1, $3
+; MIPS32R6-NEXT: and $3, $2, $4
+; MIPS32R6-NEXT: move $2, $1
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align8* %S to i56*
+ %bf.load = load i56, i56* %0, align 8
+ %bf.cast = zext i56 %bf.load to i64
+ ret i64 %bf.cast
+}
+
+define double @load_double_align1() {
+; MIPS32-LABEL: load_double_align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align1)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align1)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 3($1)
+; MIPS32-NEXT: lwr $2, 0($1)
+; MIPS32-NEXT: # implicit-def: $v1
+; MIPS32-NEXT: lwl $3, 7($1)
+; MIPS32-NEXT: lwr $3, 4($1)
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: mtc1 $3, $f1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_double_align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align1)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align1)
+; MIPS32R6-NEXT: ldc1 $f0, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load double, double* @double_align1, align 1
+ ret double %0
+}
+
+define double @load_double_align2() {
+; MIPS32-LABEL: load_double_align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align2)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align2)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 3($1)
+; MIPS32-NEXT: lwr $2, 0($1)
+; MIPS32-NEXT: # implicit-def: $v1
+; MIPS32-NEXT: lwl $3, 7($1)
+; MIPS32-NEXT: lwr $3, 4($1)
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: mtc1 $3, $f1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_double_align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align2)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align2)
+; MIPS32R6-NEXT: ldc1 $f0, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load double, double* @double_align2, align 2
+ ret double %0
+}
+
+define double @load_double_align4() {
+; MIPS32-LABEL: load_double_align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align4)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align4)
+; MIPS32-NEXT: lw $2, 0($1)
+; MIPS32-NEXT: lw $1, 4($1)
+; MIPS32-NEXT: mtc1 $2, $f0
+; MIPS32-NEXT: mtc1 $1, $f1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_double_align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align4)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align4)
+; MIPS32R6-NEXT: ldc1 $f0, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load double, double* @double_align4, align 4
+ ret double %0
+}
+
+define double @load_double_align8() {
+; MIPS32-LABEL: load_double_align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align8)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align8)
+; MIPS32-NEXT: ldc1 $f0, 0($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_double_align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align8)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align8)
+; MIPS32R6-NEXT: ldc1 $f0, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load double, double* @double_align8, align 8
+ ret double %0
+}
+
+define i64 @load_i64_align1() {
+; MIPS32-LABEL: load_i64_align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align1)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align1)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 3($1)
+; MIPS32-NEXT: lwr $2, 0($1)
+; MIPS32-NEXT: # implicit-def: $v1
+; MIPS32-NEXT: lwl $3, 7($1)
+; MIPS32-NEXT: lwr $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_i64_align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align1)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align1)
+; MIPS32R6-NEXT: lw $2, 0($1)
+; MIPS32R6-NEXT: lw $3, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load i64, i64* @i64_align1, align 1
+ ret i64 %0
+}
+
+define i64 @load_i64_align2() {
+; MIPS32-LABEL: load_i64_align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align2)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align2)
+; MIPS32-NEXT: # implicit-def: $v0
+; MIPS32-NEXT: lwl $2, 3($1)
+; MIPS32-NEXT: lwr $2, 0($1)
+; MIPS32-NEXT: # implicit-def: $v1
+; MIPS32-NEXT: lwl $3, 7($1)
+; MIPS32-NEXT: lwr $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_i64_align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align2)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align2)
+; MIPS32R6-NEXT: lw $2, 0($1)
+; MIPS32R6-NEXT: lw $3, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load i64, i64* @i64_align2, align 2
+ ret i64 %0
+}
+
+define i64 @load_i64_align4() {
+; MIPS32-LABEL: load_i64_align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align4)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align4)
+; MIPS32-NEXT: lw $2, 0($1)
+; MIPS32-NEXT: lw $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_i64_align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align4)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align4)
+; MIPS32R6-NEXT: lw $2, 0($1)
+; MIPS32R6-NEXT: lw $3, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load i64, i64* @i64_align4, align 4
+ ret i64 %0
+}
+
+define i64 @load_i64_align8() {
+; MIPS32-LABEL: load_i64_align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align8)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align8)
+; MIPS32-NEXT: lw $2, 0($1)
+; MIPS32-NEXT: lw $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: load_i64_align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align8)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align8)
+; MIPS32R6-NEXT: lw $2, 0($1)
+; MIPS32R6-NEXT: lw $3, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = load i64, i64* @i64_align8, align 8
+ ret i64 %0
+}
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
new file mode 100644
index 000000000000..7d068633a505
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
@@ -0,0 +1,588 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r6 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32R6
+
+%struct.MemSize3_Align1 = type { [3 x i8], i8 }
+%struct.MemSize3_Align2 = type { [3 x i8], i8 }
+%struct.MemSize3_Align4 = type { [3 x i8], i8 }
+%struct.MemSize3_Align8 = type { [3 x i8], i8, [4 x i8] }
+%struct.MemSize5_Align1 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align2 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align4 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize5_Align8 = type <{ [5 x i8], i16, i8 }>
+%struct.MemSize6_Align1 = type { [6 x i8], i16 }
+%struct.MemSize6_Align2 = type { [6 x i8], i16 }
+%struct.MemSize6_Align4 = type { [6 x i8], i16 }
+%struct.MemSize6_Align8 = type { [6 x i8], i16 }
+%struct.MemSize7_Align1 = type { [7 x i8], i8 }
+%struct.MemSize7_Align2 = type { [7 x i8], i8 }
+%struct.MemSize7_Align4 = type { [7 x i8], i8 }
+%struct.MemSize7_Align8 = type { [7 x i8], i8 }
+
+ at double_align1 = common global double 0.000000e+00, align 1
+ at double_align2 = common global double 0.000000e+00, align 2
+ at double_align4 = common global double 0.000000e+00, align 4
+ at double_align8 = common global double 0.000000e+00, align 8
+ at i64_align1 = common global i64 0, align 1
+ at i64_align2 = common global i64 0, align 2
+ at i64_align4 = common global i64 0, align 4
+ at i64_align8 = common global i64 0, align 8
+
+define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
+; MIPS32-LABEL: store3align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sb $5, 0($4)
+; MIPS32-NEXT: srl $1, $5, 8
+; MIPS32-NEXT: sb $1, 1($4)
+; MIPS32-NEXT: srl $1, $5, 16
+; MIPS32-NEXT: sb $1, 2($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store3align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sh $5, 0($4)
+; MIPS32R6-NEXT: srl $1, $5, 16
+; MIPS32R6-NEXT: sb $1, 2($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align1* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 1
+ ret void
+}
+
+define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
+; MIPS32-LABEL: store3align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sh $5, 0($4)
+; MIPS32-NEXT: srl $1, $5, 16
+; MIPS32-NEXT: sb $1, 2($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store3align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sh $5, 0($4)
+; MIPS32R6-NEXT: srl $1, $5, 16
+; MIPS32R6-NEXT: sb $1, 2($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align2* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 2
+ ret void
+}
+
+define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+; MIPS32-LABEL: store3align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sh $5, 0($4)
+; MIPS32-NEXT: srl $1, $5, 16
+; MIPS32-NEXT: sb $1, 2($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store3align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sh $5, 0($4)
+; MIPS32R6-NEXT: srl $1, $5, 16
+; MIPS32R6-NEXT: sb $1, 2($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align4* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 4
+ ret void
+}
+
+define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+; MIPS32-LABEL: store3align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sh $5, 0($4)
+; MIPS32-NEXT: srl $1, $5, 16
+; MIPS32-NEXT: sb $1, 2($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store3align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sh $5, 0($4)
+; MIPS32R6-NEXT: srl $1, $5, 16
+; MIPS32R6-NEXT: sb $1, 2($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize3_Align8* %S to i24*
+ %1 = trunc i32 %a to i24
+ store i24 %1, i24* %0, align 8
+ ret void
+}
+
+define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
+; MIPS32-LABEL: store5align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store5align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sb $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align1* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 1
+ ret void
+}
+
+define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
+; MIPS32-LABEL: store5align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store5align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sb $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align2* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 2
+ ret void
+}
+
+define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
+; MIPS32-LABEL: store5align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store5align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sb $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align4* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 4
+ ret void
+}
+
+define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
+; MIPS32-LABEL: store5align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store5align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sb $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize5_Align8* %S to i40*
+ %1 = trunc i64 %a to i40
+ store i40 %1, i40* %0, align 8
+ ret void
+}
+
+define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
+; MIPS32-LABEL: store6align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: ori $1, $zero, 4
+; MIPS32-NEXT: addu $1, $4, $1
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: srl $2, $7, 8
+; MIPS32-NEXT: sb $2, 1($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store6align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align1* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 1
+ ret void
+}
+
+define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
+; MIPS32-LABEL: store6align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store6align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align2* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 2
+ ret void
+}
+
+define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
+; MIPS32-LABEL: store6align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store6align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align4* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 4
+ ret void
+}
+
+define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
+; MIPS32-LABEL: store6align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store6align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize6_Align8* %S to i48*
+ %1 = trunc i64 %a to i48
+ store i48 %1, i48* %0, align 8
+ ret void
+}
+
+define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
+; MIPS32-LABEL: store7align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: ori $1, $zero, 4
+; MIPS32-NEXT: addu $1, $4, $1
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sb $7, 4($4)
+; MIPS32-NEXT: srl $2, $7, 8
+; MIPS32-NEXT: sb $2, 1($1)
+; MIPS32-NEXT: srl $2, $7, 16
+; MIPS32-NEXT: sb $2, 2($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store7align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: ori $1, $zero, 4
+; MIPS32R6-NEXT: addu $1, $4, $1
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: srl $2, $7, 16
+; MIPS32R6-NEXT: sb $2, 2($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align1* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 1
+ ret void
+}
+
+define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
+; MIPS32-LABEL: store7align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: ori $1, $zero, 4
+; MIPS32-NEXT: addu $1, $4, $1
+; MIPS32-NEXT: swl $6, 3($4)
+; MIPS32-NEXT: swr $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: srl $2, $7, 16
+; MIPS32-NEXT: sb $2, 2($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store7align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: ori $1, $zero, 4
+; MIPS32R6-NEXT: addu $1, $4, $1
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: srl $2, $7, 16
+; MIPS32R6-NEXT: sb $2, 2($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align2* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 2
+ ret void
+}
+
+define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
+; MIPS32-LABEL: store7align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: ori $1, $zero, 4
+; MIPS32-NEXT: addu $1, $4, $1
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: srl $2, $7, 16
+; MIPS32-NEXT: sb $2, 2($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store7align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: ori $1, $zero, 4
+; MIPS32R6-NEXT: addu $1, $4, $1
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: srl $2, $7, 16
+; MIPS32R6-NEXT: sb $2, 2($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align4* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 4
+ ret void
+}
+
+define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
+; MIPS32-LABEL: store7align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: ori $1, $zero, 4
+; MIPS32-NEXT: addu $1, $4, $1
+; MIPS32-NEXT: sw $6, 0($4)
+; MIPS32-NEXT: sh $7, 4($4)
+; MIPS32-NEXT: srl $2, $7, 16
+; MIPS32-NEXT: sb $2, 2($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store7align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: ori $1, $zero, 4
+; MIPS32R6-NEXT: addu $1, $4, $1
+; MIPS32R6-NEXT: sw $6, 0($4)
+; MIPS32R6-NEXT: sh $7, 4($4)
+; MIPS32R6-NEXT: srl $2, $7, 16
+; MIPS32R6-NEXT: sb $2, 2($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ %0 = bitcast %struct.MemSize7_Align8* %S to i56*
+ %1 = trunc i64 %a to i56
+ store i56 %1, i56* %0, align 8
+ ret void
+}
+
+define void @store_double_align1(double %a) {
+; MIPS32-LABEL: store_double_align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align1)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align1)
+; MIPS32-NEXT: mfc1 $2, $f12
+; MIPS32-NEXT: mfc1 $3, $f13
+; MIPS32-NEXT: swl $2, 3($1)
+; MIPS32-NEXT: swr $2, 0($1)
+; MIPS32-NEXT: swl $3, 7($1)
+; MIPS32-NEXT: swr $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_double_align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align1)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align1)
+; MIPS32R6-NEXT: sdc1 $f12, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store double %a, double* @double_align1, align 1
+ ret void
+}
+
+define void @store_double_align2(double %a) {
+; MIPS32-LABEL: store_double_align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align2)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align2)
+; MIPS32-NEXT: mfc1 $2, $f12
+; MIPS32-NEXT: mfc1 $3, $f13
+; MIPS32-NEXT: swl $2, 3($1)
+; MIPS32-NEXT: swr $2, 0($1)
+; MIPS32-NEXT: swl $3, 7($1)
+; MIPS32-NEXT: swr $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_double_align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align2)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align2)
+; MIPS32R6-NEXT: sdc1 $f12, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store double %a, double* @double_align2, align 2
+ ret void
+}
+
+define void @store_double_align4(double %a) {
+; MIPS32-LABEL: store_double_align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align4)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align4)
+; MIPS32-NEXT: mfc1 $2, $f12
+; MIPS32-NEXT: mfc1 $3, $f13
+; MIPS32-NEXT: sw $2, 0($1)
+; MIPS32-NEXT: sw $3, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_double_align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align4)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align4)
+; MIPS32R6-NEXT: sdc1 $f12, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store double %a, double* @double_align4, align 4
+ ret void
+}
+
+define void @store_double_align8(double %a) {
+; MIPS32-LABEL: store_double_align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(double_align8)
+; MIPS32-NEXT: addiu $1, $1, %lo(double_align8)
+; MIPS32-NEXT: sdc1 $f12, 0($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_double_align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(double_align8)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(double_align8)
+; MIPS32R6-NEXT: sdc1 $f12, 0($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store double %a, double* @double_align8, align 8
+ ret void
+}
+
+define void @store_i64_align1(i64 %a) {
+; MIPS32-LABEL: store_i64_align1:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align1)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align1)
+; MIPS32-NEXT: swl $4, 3($1)
+; MIPS32-NEXT: swr $4, 0($1)
+; MIPS32-NEXT: swl $5, 7($1)
+; MIPS32-NEXT: swr $5, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_i64_align1:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align1)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align1)
+; MIPS32R6-NEXT: sw $4, 0($1)
+; MIPS32R6-NEXT: sw $5, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store i64 %a, i64* @i64_align1, align 1
+ ret void
+}
+
+define void @store_i64_align2(i64 signext %a) {
+; MIPS32-LABEL: store_i64_align2:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align2)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align2)
+; MIPS32-NEXT: swl $4, 3($1)
+; MIPS32-NEXT: swr $4, 0($1)
+; MIPS32-NEXT: swl $5, 7($1)
+; MIPS32-NEXT: swr $5, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_i64_align2:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align2)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align2)
+; MIPS32R6-NEXT: sw $4, 0($1)
+; MIPS32R6-NEXT: sw $5, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store i64 %a, i64* @i64_align2, align 2
+ ret void
+}
+
+define void @store_i64_align4(i64 %a) {
+; MIPS32-LABEL: store_i64_align4:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align4)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align4)
+; MIPS32-NEXT: sw $4, 0($1)
+; MIPS32-NEXT: sw $5, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_i64_align4:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align4)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align4)
+; MIPS32R6-NEXT: sw $4, 0($1)
+; MIPS32R6-NEXT: sw $5, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store i64 %a, i64* @i64_align4, align 4
+ ret void
+}
+
+define void @store_i64_align8(i64 signext %a) {
+; MIPS32-LABEL: store_i64_align8:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, %hi(i64_align8)
+; MIPS32-NEXT: addiu $1, $1, %lo(i64_align8)
+; MIPS32-NEXT: sw $4, 0($1)
+; MIPS32-NEXT: sw $5, 4($1)
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: nop
+;
+; MIPS32R6-LABEL: store_i64_align8:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: lui $1, %hi(i64_align8)
+; MIPS32R6-NEXT: addiu $1, $1, %lo(i64_align8)
+; MIPS32R6-NEXT: sw $4, 0($1)
+; MIPS32R6-NEXT: sw $5, 4($1)
+; MIPS32R6-NEXT: jrc $ra
+entry:
+ store i64 %a, i64* @i64_align8, align 8
+ ret void
+}
More information about the llvm-commits
mailing list