[llvm] 73c7c56 - [LLVM][DAGCombiner] Look through freeze when combining extensions of loads (#175022)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 29 04:01:48 PST 2026
Author: David Sherwood
Date: 2026-01-29T12:01:43Z
New Revision: 73c7c562dd9e1ead9641aaee5f67d972e1b40c84
URL: https://github.com/llvm/llvm-project/commit/73c7c562dd9e1ead9641aaee5f67d972e1b40c84
DIFF: https://github.com/llvm/llvm-project/commit/73c7c562dd9e1ead9641aaee5f67d972e1b40c84.diff
LOG: [LLVM][DAGCombiner] Look through freeze when combining extensions of loads (#175022)
Following on from https://github.com/llvm/llvm-project/pull/172484 I
have added support to tryToFoldExtOfLoad for looking through freezes, in
order to catch more cases of extending loads. This type of code is
sometimes seen being generated by the loop vectoriser. For now I've
limited this to cases where the load is only used by the freeze, since
otherwise it leads to worse code in some X86 tests.
Added:
llvm/test/CodeGen/AArch64/sve-ldst-sext.ll
llvm/test/CodeGen/AArch64/sve-ldst-zext.ll
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
llvm/test/CodeGen/X86/avx512-ext.ll
llvm/test/CodeGen/X86/iabs.ll
llvm/test/CodeGen/X86/icmp-abs-C.ll
llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
llvm/test/CodeGen/X86/known-bits.ll
llvm/test/CodeGen/X86/neg-abs.ll
llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/basic.ll.expected
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6ea4864ca98a8..93cc67ae00f4b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14626,14 +14626,19 @@ static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
ISD::LoadExtType ExtLoadType,
ISD::NodeType ExtOpc,
bool NonNegZExt = false) {
- if (!ISD::isNON_EXTLoad(N0.getNode()) || !ISD::isUNINDEXEDLoad(N0.getNode()))
+ bool Frozen = N0.getOpcode() == ISD::FREEZE;
+ SDValue Freeze = Frozen ? N0 : SDValue();
+ auto *Load = dyn_cast<LoadSDNode>(Frozen ? N0.getOperand(0) : N0);
+ // TODO: Support multiple uses of the load when frozen.
+ if (!Load || !ISD::isNON_EXTLoad(Load) || !ISD::isUNINDEXEDLoad(Load) ||
+ (Frozen && !Load->hasNUsesOfValue(1, 0)))
return {};
// If this is zext nneg, see if it would make sense to treat it as a sext.
if (NonNegZExt) {
assert(ExtLoadType == ISD::ZEXTLOAD && ExtOpc == ISD::ZERO_EXTEND &&
"Unexpected load type or opcode");
- for (SDNode *User : N0->users()) {
+ for (SDNode *User : Load->users()) {
if (User->getOpcode() == ISD::SETCC) {
ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
if (ISD::isSignedIntSetCC(CC)) {
@@ -14648,35 +14653,47 @@ static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
// TODO: isFixedLengthVector() should be removed and any negative effects on
// code generation being the result of that target's implementation of
// isVectorLoadExtDesirable().
- if ((LegalOperations || VT.isFixedLengthVector() ||
- !cast<LoadSDNode>(N0)->isSimple()) &&
- !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType()))
+ if ((LegalOperations || VT.isFixedLengthVector() || !Load->isSimple()) &&
+ !TLI.isLoadExtLegal(ExtLoadType, VT, Load->getValueType(0)))
return {};
bool DoXform = true;
SmallVector<SDNode *, 4> SetCCs;
- if (!N0.hasOneUse())
- DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ExtOpc, SetCCs, TLI);
+ if (!N0->hasOneUse())
+ DoXform = ExtendUsesToFormExtLoad(VT, N, Frozen ? Freeze : SDValue(Load, 0),
+ ExtOpc, SetCCs, TLI);
if (VT.isVector())
DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
if (!DoXform)
return {};
- LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
- LN0->getBasePtr(), N0.getValueType(),
- LN0->getMemOperand());
- Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, ExtOpc);
+ SDLoc DL(Load);
// If the load value is used only by N, replace it via CombineTo N.
- bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
- Combiner.CombineTo(N, ExtLoad);
+ bool NoReplaceTrunc = N0.hasOneUse();
+ SDValue ExtLoad =
+ DAG.getExtLoad(ExtLoadType, DL, VT, Load->getChain(), Load->getBasePtr(),
+ Load->getValueType(0), Load->getMemOperand());
+ SDValue Res = ExtLoad;
+ if (Frozen) {
+ Res = DAG.getFreeze(ExtLoad);
+ Res = DAG.getNode(ExtLoadType == ISD::SEXTLOAD ? ISD::AssertSext
+ : ISD::AssertZext,
+ DL, Res.getValueType(), Res,
+ DAG.getValueType(Load->getValueType(0).getScalarType()));
+ }
+ Combiner.ExtendSetCCUses(SetCCs, N0, Res, ExtOpc);
+ Combiner.CombineTo(N, Res);
if (NoReplaceTrunc) {
- DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
- Combiner.recursivelyDeleteUnusedNodes(LN0);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
+ Combiner.recursivelyDeleteUnusedNodes(N0.getNode());
} else {
- SDValue Trunc =
- DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
- Combiner.CombineTo(LN0, Trunc, ExtLoad.getValue(1));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, Load->getValueType(0), Res);
+ if (Frozen) {
+ Combiner.CombineTo(Freeze.getNode(), Trunc);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
+ } else {
+ Combiner.CombineTo(Load, Trunc, ExtLoad.getValue(1));
+ }
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 681ceb22c0ad3..f10b6dfa902ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -1190,7 +1190,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
// Generic Result Splitting.
void SplitRes_MERGE_VALUES(SDNode *N, unsigned ResNo,
SDValue &Lo, SDValue &Hi);
- void SplitVecRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_AssertZext(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_AssertSext(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_ARITH_FENCE (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_Select (SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitRes_SELECT_CC (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 6ded0bf0a92c0..26036207c4b82 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -599,6 +599,16 @@ void DAGTypeLegalizer::SplitVecRes_AssertZext(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(ISD::AssertZext, dl, H.getValueType(), H, N->getOperand(1));
}
+void DAGTypeLegalizer::SplitVecRes_AssertSext(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDValue L, H;
+ SDLoc dl(N);
+ GetSplitOp(N->getOperand(0), L, H);
+
+ Lo = DAG.getNode(ISD::AssertSext, dl, L.getValueType(), L, N->getOperand(1));
+ Hi = DAG.getNode(ISD::AssertSext, dl, H.getValueType(), H, N->getOperand(1));
+}
+
void DAGTypeLegalizer::SplitRes_FREEZE(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue L, H;
SDLoc dl(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 75a0cc8f03739..4db9c5d009ae8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1217,6 +1217,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
break;
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::AssertZext: SplitVecRes_AssertZext(N, Lo, Hi); break;
+ case ISD::AssertSext: SplitVecRes_AssertSext(N, Lo, Hi); break;
case ISD::VSELECT:
case ISD::SELECT:
case ISD::VP_MERGE:
diff --git a/llvm/test/CodeGen/AArch64/sve-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-ldst-sext.ll
new file mode 100644
index 0000000000000..71d5bad97f2d7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-ldst-sext.ll
@@ -0,0 +1,435 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define <vscale x 2 x i64> @sload_nxv2i8(ptr %src) {
+; CHECK-LABEL: sload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 2 x i64> @sload_nxv2i16(ptr %src) {
+; CHECK-LABEL: sload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i16>, ptr %src, align 2
+ %ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 2 x i64> @sload_nxv2i32(ptr %src) {
+; CHECK-LABEL: sload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i32>, ptr %src, align 4
+ %ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 4 x i32> @sload_nxv4i8(ptr %src) {
+; CHECK-LABEL: sload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i8>, ptr %src, align 1
+ %ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %ext
+}
+
+define <vscale x 4 x i32> @sload_nxv4i16(ptr %src) {
+; CHECK-LABEL: sload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i16>, ptr %src, align 2
+ %ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %ext
+}
+
+define <vscale x 8 x i16> @sload_nxv8i8(ptr %src) {
+; CHECK-LABEL: sload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %src, align 1
+ %ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %ext
+}
+
+; Return type requires splitting
+define <vscale x 8 x i64> @sload_nxv8i16(ptr %a) {
+; CHECK-LABEL: sload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1sh { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i16>, ptr %a, align 2
+ %ext = sext <vscale x 8 x i16> %load to <vscale x 8 x i64>
+ ret <vscale x 8 x i64> %ext
+}
+
+define <vscale x 8 x i32> @sload_nxv8i8_nxv8i32(ptr %a) {
+; CHECK-LABEL: sload_nxv8i8_nxv8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %a, align 1
+ %ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %ext
+}
+
+; load requires promotion
+define <vscale x 2 x double> @sload_2i16_2f64(ptr noalias %in) {
+; CHECK-LABEL: sload_2i16_2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ucvtf z0.d, p0/m, z0.s
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i16>, ptr %in, align 2
+ %sext = sext <vscale x 2 x i16> %load to <vscale x 2 x i32>
+ %res = uitofp <vscale x 2 x i32> %sext to <vscale x 2 x double>
+ ret <vscale x 2 x double> %res
+}
+
+; Extending loads from unpacked to wide illegal types
+
+define <vscale x 4 x i64> @sload_4i8_4i64(ptr %a) {
+; CHECK-LABEL: sload_4i8_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i8>, ptr %a, align 1
+ %aext = sext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %aext
+}
+
+define <vscale x 4 x i64> @sload_4i16_4i64(ptr %a) {
+; CHECK-LABEL: sload_4i16_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i16>, ptr %a, align 2
+ %aext = sext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %aext
+}
+
+define <vscale x 8 x i32> @sload_8i8_8i32(ptr %a) {
+; CHECK-LABEL: sload_8i8_8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %aext
+}
+
+define <vscale x 8 x i64> @sload_8i8_8i64(ptr %a) {
+; CHECK-LABEL: sload_8i8_8i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
+ ret <vscale x 8 x i64> %aext
+}
+
+define <vscale x 4 x i64> @sload_x2_4i8_4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sload_x2_4i8_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z1.d, z1.d, z2.d
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i8>, ptr %a, align 1
+ %bval = load <vscale x 4 x i8>, ptr %b, align 1
+ %aext = sext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
+ %bext = sext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
+ %res = add <vscale x 4 x i64> %aext, %bext
+ ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @sload_x2_4i16_4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sload_x2_4i16_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z2.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sh { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z1.d, z1.d, z2.d
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i16>, ptr %a, align 2
+ %bval = load <vscale x 4 x i16>, ptr %b, align 2
+ %aext = sext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
+ %bext = sext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
+ %res = add <vscale x 4 x i64> %aext, %bext
+ ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 8 x i32> @sload_x2_8i8_8i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sload_x2_8i8_8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z2.s }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sb { z3.s }, p0/z, [x1]
+; CHECK-NEXT: add z1.s, z1.s, z2.s
+; CHECK-NEXT: add z0.s, z0.s, z3.s
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %bval = load <vscale x 8 x i8>, ptr %b, align 1
+ %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
+ %bext = sext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
+ %res = add <vscale x 8 x i32> %aext, %bext
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i64> @sload_x2_8i8_8i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sload_x2_8i8_8i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z4.d }, p0/z, [x1]
+; CHECK-NEXT: ld1sb { z5.d }, p0/z, [x1, #3, mul vl]
+; CHECK-NEXT: ld1sb { z6.d }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT: ld1sb { z7.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: add z0.d, z0.d, z4.d
+; CHECK-NEXT: add z3.d, z3.d, z5.d
+; CHECK-NEXT: add z1.d, z1.d, z7.d
+; CHECK-NEXT: add z2.d, z2.d, z6.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %bval = load <vscale x 8 x i8>, ptr %b, align 1
+ %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
+ %bext = sext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
+ %res = add <vscale x 8 x i64> %aext, %bext
+ ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 2 x i64> @load_frozen_before_sext(ptr %src) {
+; CHECK-LABEL: load_frozen_before_sext:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 8 x i32> @load_frozen_before_sext_needs_splitting(ptr %src) {
+; CHECK-LABEL: load_frozen_before_sext_needs_splitting:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %src
+ %load.frozen = freeze <vscale x 8 x i8> %load
+ %ext = sext <vscale x 8 x i8> %load.frozen to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %ext
+}
+
+; A multi-use freeze in this example effectively means the load is also multi-use.
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse(ptr %src) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0]
+; CHECK-NEXT: movprfx z0, z1
+; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
+; CHECK-NEXT: // fake_use: $z1
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ call void (...) @llvm.fake.use(<vscale x 2 x i8> %load.frozen)
+ ret <vscale x 2 x i64> %ext
+}
+
+; In this example the freeze is used twice and the load used once.
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse2(ptr %src) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: movprfx z1, z0
+; CHECK-NEXT: sxtb z1.d, p0/m, z0.d
+; CHECK-NEXT: mul z0.d, z0.d, #13
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %mul = mul <vscale x 2 x i8> %load.frozen, splat (i8 13)
+ %mul.ext = zext <vscale x 2 x i8> %mul to <vscale x 2 x i64>
+ %res = add <vscale x 2 x i64> %ext, %mul.ext
+ ret <vscale x 2 x i64> %res
+}
+
+; In this example the freeze is used 3 times and the load used twice via the chain.
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse3(ptr %src, ptr %dst1, ptr %dst2) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0]
+; CHECK-NEXT: movprfx z0, z1
+; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
+; CHECK-NEXT: and z1.d, z1.d, #0xff
+; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: mul z1.d, z1.d, #49
+; CHECK-NEXT: mul z2.d, z2.d, #33
+; CHECK-NEXT: add z0.d, z0.d, z1.d
+; CHECK-NEXT: st1w { z2.d }, p0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x2]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %sext64 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %zext32 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i32>
+ %zext16 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i16>
+ %mul32 = mul <vscale x 2 x i32> %zext32, splat (i32 33)
+ %mul16 = mul <vscale x 2 x i16> %zext16, splat (i16 49)
+ store <vscale x 2 x i32> %mul32, ptr %dst1, align 4
+ store <vscale x 2 x i16> %mul16, ptr %dst2, align 2
+ %mul32.ext64 = sext <vscale x 2 x i16> %mul16 to <vscale x 2 x i64>
+ %res = add <vscale x 2 x i64> %sext64, %mul32.ext64
+ ret <vscale x 2 x i64> %res
+}
+
+; In this example the freeze is used twice - once for the sign-extend and
+; once for the icmp (or SETCC)
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse4(ptr %src1, ptr %src2, ptr %dst1) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #1234 // =0x4d2
+; CHECK-NEXT: mov z3.d, #27 // =0x1b
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x1]
+; CHECK-NEXT: movprfx z1, z0
+; CHECK-NEXT: sxtb z1.d, p0/m, z0.d
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, #3
+; CHECK-NEXT: mov z0.d, x8
+; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: sel z1.d, p1, z2.d, z3.d
+; CHECK-NEXT: st1b { z1.d }, p0, [x2]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src1, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %sext64 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %cmp = icmp eq <vscale x 2 x i8> %load.frozen, splat (i8 3)
+ %load2 = load <vscale x 2 x i8>, ptr %src2, align 1
+ %sel = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %load2, <vscale x 2 x i8> splat (i8 27)
+ store <vscale x 2 x i8> %sel, ptr %dst1, align 1
+ %res = add <vscale x 2 x i64> %sext64, splat (i64 1234)
+ ret <vscale x 2 x i64> %res
+}
+
+; In the following 3 variants is one use of the freeze, and there are
+; multiple uses of the load via the chain. Each variant tests a
diff erent
+; case:
+; 1. Dest type is illegal, source is legal.
+; 2. Dest type is legal, source is illegal.
+; 3. Both dest and source types are illegal.o
+
+
+define <vscale x 16 x i64> @load_frozen_before_zext_multiuse5_dst_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse5_dst_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z24.d, #3 // =0x3
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1sb { z4.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1sb { z5.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1sb { z6.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1sb { z7.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: str z24, [x1, #6, mul vl]
+; CHECK-NEXT: str z24, [x1, #7, mul vl]
+; CHECK-NEXT: str z24, [x1, #4, mul vl]
+; CHECK-NEXT: str z24, [x1, #5, mul vl]
+; CHECK-NEXT: str z24, [x1, #2, mul vl]
+; CHECK-NEXT: str z24, [x1, #3, mul vl]
+; CHECK-NEXT: str z24, [x1]
+; CHECK-NEXT: str z24, [x1, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 16 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 16 x i8> %load
+ %ext = sext <vscale x 16 x i8> %load.frozen to <vscale x 16 x i64>
+ store <vscale x 16 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 16 x i64> %ext
+}
+
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse5_src_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse5_src_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z1.d, #3 // =0x3
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ store <vscale x 2 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 4 x i64> @load_frozen_before_sext_multiuse5_both_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse5_both_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z2.d, #3 // =0x3
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: str z2, [x1, #1, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 4 x i8> %load
+ %ext = sext <vscale x 4 x i8> %load.frozen to <vscale x 4 x i64>
+ store <vscale x 4 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 4 x i64> %ext
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-ldst-zext.ll
new file mode 100644
index 0000000000000..93d9f974fdbd7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-ldst-zext.ll
@@ -0,0 +1,426 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define <vscale x 2 x i64> @zload_nxv2i8(ptr %src) {
+; CHECK-LABEL: zload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 2 x i64> @zload_nxv2i16(ptr %src) {
+; CHECK-LABEL: zload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i16>, ptr %src, align 2
+ %ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 2 x i64> @zload_nxv2i32(ptr %src) {
+; CHECK-LABEL: zload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i32>, ptr %src, align 4
+ %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 4 x i32> @zload_nxv4i8(ptr %src) {
+; CHECK-LABEL: zload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i8>, ptr %src, align 1
+ %ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %ext
+}
+
+define <vscale x 4 x i32> @zload_nxv4i16(ptr %src) {
+; CHECK-LABEL: zload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i16>, ptr %src, align 2
+ %ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %ext
+}
+
+define <vscale x 8 x i16> @zload_nxv8i8(ptr %src) {
+; CHECK-LABEL: zload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %src, align 1
+ %ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %ext
+}
+
+; Return type requires splitting
+define <vscale x 8 x i64> @zload_nxv8i16(ptr %a) {
+; CHECK-LABEL: zload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i16>, ptr %a, align 2
+ %ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
+ ret <vscale x 8 x i64> %ext
+}
+
+define <vscale x 8 x i32> @zload_nxv8i8_nxv8i32(ptr %a) {
+; CHECK-LABEL: zload_nxv8i8_nxv8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %a, align 1
+ %ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %ext
+}
+
+; load requires promotion
+define <vscale x 2 x double> @zload_2i16_2f64(ptr noalias %in) {
+; CHECK-LABEL: zload_2i16_2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i16>, ptr %in, align 2
+ %zext = zext <vscale x 2 x i16> %load to <vscale x 2 x i32>
+ %res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>
+ ret <vscale x 2 x double> %res
+}
+
+; Extending loads from unpacked to wide illegal types
+
+define <vscale x 4 x i64> @zload_4i8_4i64(ptr %a) {
+; CHECK-LABEL: zload_4i8_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i8>, ptr %a, align 1
+ %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %aext
+}
+
+define <vscale x 4 x i64> @zload_4i16_4i64(ptr %a) {
+; CHECK-LABEL: zload_4i16_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i16>, ptr %a, align 2
+ %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %aext
+}
+
+define <vscale x 8 x i32> @zload_8i8_8i32(ptr %a) {
+; CHECK-LABEL: zload_8i8_8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %aext
+}
+
+define <vscale x 8 x i64> @zload_8i8_8i64(ptr %a) {
+; CHECK-LABEL: zload_8i8_8i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
+ ret <vscale x 8 x i64> %aext
+}
+
+define <vscale x 4 x i64> @zload_x2_4i8_4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: zload_x2_4i8_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1b { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z1.d, z1.d, z2.d
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i8>, ptr %a, align 1
+ %bval = load <vscale x 4 x i8>, ptr %b, align 1
+ %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
+ %bext = zext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
+ %res = add <vscale x 4 x i64> %aext, %bext
+ ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @zload_x2_4i16_4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: zload_x2_4i16_4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z2.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z1.d, z1.d, z2.d
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 4 x i16>, ptr %a, align 2
+ %bval = load <vscale x 4 x i16>, ptr %b, align 2
+ %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
+ %bext = zext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
+ %res = add <vscale x 4 x i64> %aext, %bext
+ ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 8 x i32> @zload_x2_8i8_8i32(ptr %a, ptr %b) {
+; CHECK-LABEL: zload_x2_8i8_8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.s }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1b { z3.s }, p0/z, [x1]
+; CHECK-NEXT: add z1.s, z1.s, z2.s
+; CHECK-NEXT: add z0.s, z0.s, z3.s
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %bval = load <vscale x 8 x i8>, ptr %b, align 1
+ %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
+ %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
+ %res = add <vscale x 8 x i32> %aext, %bext
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i64> @zload_x2_8i8_8i64(ptr %a, ptr %b) {
+; CHECK-LABEL: zload_x2_8i8_8i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z4.d }, p0/z, [x1]
+; CHECK-NEXT: ld1b { z5.d }, p0/z, [x1, #3, mul vl]
+; CHECK-NEXT: ld1b { z6.d }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT: ld1b { z7.d }, p0/z, [x1, #1, mul vl]
+; CHECK-NEXT: add z0.d, z0.d, z4.d
+; CHECK-NEXT: add z3.d, z3.d, z5.d
+; CHECK-NEXT: add z1.d, z1.d, z7.d
+; CHECK-NEXT: add z2.d, z2.d, z6.d
+; CHECK-NEXT: ret
+ %aval = load <vscale x 8 x i8>, ptr %a, align 1
+ %bval = load <vscale x 8 x i8>, ptr %b, align 1
+ %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
+ %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
+ %res = add <vscale x 8 x i64> %aext, %bext
+ ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 2 x i64> @load_frozen_before_zext(ptr %src) {
+; CHECK-LABEL: load_frozen_before_zext:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 8 x i32> @load_frozen_before_zext_needs_splitting(ptr %src) {
+; CHECK-LABEL: load_frozen_before_zext_needs_splitting:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 8 x i8>, ptr %src
+ %load.frozen = freeze <vscale x 8 x i8> %load
+ %ext = zext <vscale x 8 x i8> %load.frozen to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %ext
+}
+
+; A multi-use freeze in this example effectively means the load is also multi-use.
+define <vscale x 2 x i64> @load_frozen_before_zext_multiuse(ptr %src) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: mov z1.d, z0.d
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: // fake_use: $z1
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ call void (...) @llvm.fake.use(<vscale x 2 x i8> %load.frozen)
+ ret <vscale x 2 x i64> %ext
+}
+
+; In this example the freeze is used twice and the load used once.
+define <vscale x 2 x i64> @load_frozen_before_zext_multiuse2(ptr %src) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: mov z1.d, z0.d
+; CHECK-NEXT: mul z0.d, z0.d, #13
+; CHECK-NEXT: and z1.d, z1.d, #0xff
+; CHECK-NEXT: sxtb z0.d, p0/m, z0.d
+; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %mul = mul <vscale x 2 x i8> %load.frozen, splat (i8 13)
+ %mul.ext = sext <vscale x 2 x i8> %mul to <vscale x 2 x i64>
+ %res = add <vscale x 2 x i64> %ext, %mul.ext
+ ret <vscale x 2 x i64> %res
+}
+
+; In this example the freeze is used 3 times and the load used twice via the chain.
+define <vscale x 2 x i64> @load_frozen_before_sext_multiuse3(ptr %src, ptr %dst1, ptr %dst2) {
+; CHECK-LABEL: load_frozen_before_sext_multiuse3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: movprfx z1, z0
+; CHECK-NEXT: sxtb z1.d, p0/m, z0.d
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: mov z2.d, z1.d
+; CHECK-NEXT: mul z1.d, z1.d, #49
+; CHECK-NEXT: mul z2.d, z2.d, #33
+; CHECK-NEXT: add z0.d, z0.d, z1.d
+; CHECK-NEXT: st1w { z2.d }, p0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x2]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %zext64 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %sext32 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i32>
+ %sext16 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i16>
+ %mul32 = mul <vscale x 2 x i32> %sext32, splat (i32 33)
+ %mul16 = mul <vscale x 2 x i16> %sext16, splat (i16 49)
+ store <vscale x 2 x i32> %mul32, ptr %dst1, align 4
+ store <vscale x 2 x i16> %mul16, ptr %dst2, align 2
+ %mul32.ext64 = sext <vscale x 2 x i16> %mul16 to <vscale x 2 x i64>
+ %res = add <vscale x 2 x i64> %zext64, %mul32.ext64
+ ret <vscale x 2 x i64> %res
+}
+
+; In this example the freeze is used twice - once for the sign-extend and
+; once for the icmp (or SETCC)
+define <vscale x 2 x i64> @load_frozen_before_zext_multiuse4(ptr %src1, ptr %src2, ptr %dst1) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov w8, #1234 // =0x4d2
+; CHECK-NEXT: mov z3.d, #27 // =0x1b
+; CHECK-NEXT: mov z1.d, x8
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x1]
+; CHECK-NEXT: and z0.d, z0.d, #0xff
+; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, #3
+; CHECK-NEXT: add z0.d, z0.d, z1.d
+; CHECK-NEXT: sel z1.d, p1, z2.d, z3.d
+; CHECK-NEXT: st1b { z1.d }, p0, [x2]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src1, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %zext64 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ %cmp = icmp eq <vscale x 2 x i8> %load.frozen, splat (i8 3)
+ %load2 = load <vscale x 2 x i8>, ptr %src2, align 1
+ %sel = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %load2, <vscale x 2 x i8> splat (i8 27)
+ store <vscale x 2 x i8> %sel, ptr %dst1, align 1
+ %res = add <vscale x 2 x i64> %zext64, splat (i64 1234)
+ ret <vscale x 2 x i64> %res
+}
+
+; There is one use of the freeze, and multiple uses of the load via the chain.
+define <vscale x 16 x i64> @load_frozen_before_zext_multiuse5_dst_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse5_dst_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z24.d, #3 // =0x3
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1b { z4.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1b { z5.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1b { z6.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z7.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: str z24, [x1, #6, mul vl]
+; CHECK-NEXT: str z24, [x1, #7, mul vl]
+; CHECK-NEXT: str z24, [x1, #4, mul vl]
+; CHECK-NEXT: str z24, [x1, #5, mul vl]
+; CHECK-NEXT: str z24, [x1, #2, mul vl]
+; CHECK-NEXT: str z24, [x1, #3, mul vl]
+; CHECK-NEXT: str z24, [x1]
+; CHECK-NEXT: str z24, [x1, #1, mul vl]
+; CHECK-NEXT: ret
+ %load = load <vscale x 16 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 16 x i8> %load
+ %ext = zext <vscale x 16 x i8> %load.frozen to <vscale x 16 x i64>
+ store <vscale x 16 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 16 x i64> %ext
+}
+
+define <vscale x 2 x i64> @load_frozen_before_zext_multiuse5_src_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse5_src_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z1.d, #3 // =0x3
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %load = load <vscale x 2 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 2 x i8> %load
+ %ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
+ store <vscale x 2 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 2 x i64> %ext
+}
+
+define <vscale x 4 x i64> @load_frozen_before_zext_multiuse5_both_illegal(ptr %src, ptr %dst) {
+; CHECK-LABEL: load_frozen_before_zext_multiuse5_both_illegal:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z2.d, #3 // =0x3
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: str z2, [x1, #1, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %load = load <vscale x 4 x i8>, ptr %src, align 1
+ %load.frozen = freeze <vscale x 4 x i8> %load
+ %ext = zext <vscale x 4 x i8> %load.frozen to <vscale x 4 x i64>
+ store <vscale x 4 x i64> splat (i64 3), ptr %dst, align 8
+ ret <vscale x 4 x i64> %ext
+}
diff --git a/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll b/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
index 503afa8803a43..212dc0bfb12e9 100644
--- a/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
+++ b/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
@@ -22,3 +22,34 @@ cond_next245: ; preds = %entry
%tmp256 = and i32 %tmp180181, 15 ; <i32> [#uses=0]
ret i16 0
}
+
+define signext i16 @t_freeze(ptr %p) {
+; CHECK-LABEL: t_freeze:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movswl (%ecx), %eax
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: js .LBB1_1
+; CHECK-NEXT: # %bb.2: # %cond_next
+; CHECK-NEXT: andl $15, %eax
+; CHECK-NEXT: movl %eax, (%ecx)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retl
+; CHECK-NEXT: .LBB1_1: # %cond_true
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retl
+entry:
+ %ld = load i16, ptr %p, align 2
+ %ld.fr = freeze i16 %ld
+ %sext = sext i16 %ld.fr to i32
+ %cmp = icmp slt i16 %ld.fr, 0
+ br i1 %cmp, label %cond_true, label %cond_next
+
+cond_true:
+ ret i16 %ld.fr
+
+cond_next:
+ %tmp2 = and i32 %sext, 15
+ store i32 %tmp2, ptr %p, align 4
+ ret i16 0
+}
diff --git a/llvm/test/CodeGen/X86/avx512-ext.ll b/llvm/test/CodeGen/X86/avx512-ext.ll
index 2617e2d12adfd..43ca488351986 100644
--- a/llvm/test/CodeGen/X86/avx512-ext.ll
+++ b/llvm/test/CodeGen/X86/avx512-ext.ll
@@ -6,8 +6,7 @@
define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x16:
; KNL: # %bb.0:
-; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -22,8 +21,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
;
; AVX512DQNOBW-LABEL: zext_8x8mem_to_8x16:
; AVX512DQNOBW: # %bb.0:
-; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -37,8 +35,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x16:
; KNL: # %bb.0:
-; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; KNL-NEXT: vpmovsxbw %xmm1, %xmm1
+; KNL-NEXT: vpmovsxbw (%rdi), %xmm1
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -53,8 +50,7 @@ define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
;
; AVX512DQNOBW-LABEL: sext_8x8mem_to_8x16:
; AVX512DQNOBW: # %bb.0:
-; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512DQNOBW-NEXT: vpmovsxbw %xmm1, %xmm1
+; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %xmm1
; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/iabs.ll b/llvm/test/CodeGen/X86/iabs.ll
index bdceeefbcfaba..3c3171032451a 100644
--- a/llvm/test/CodeGen/X86/iabs.ll
+++ b/llvm/test/CodeGen/X86/iabs.ll
@@ -37,8 +37,8 @@ define i8 @test_i8(i8 %a) nounwind {
define i16 @test_i16(i16 %a) nounwind {
; X86-NO-CMOV-LABEL: test_i16:
; X86-NO-CMOV: # %bb.0:
-; X86-NO-CMOV-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NO-CMOV-NEXT: movswl %ax, %ecx
+; X86-NO-CMOV-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NO-CMOV-NEXT: movl %eax, %ecx
; X86-NO-CMOV-NEXT: sarl $15, %ecx
; X86-NO-CMOV-NEXT: xorl %ecx, %eax
; X86-NO-CMOV-NEXT: subl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/icmp-abs-C.ll b/llvm/test/CodeGen/X86/icmp-abs-C.ll
index c98889b7d5cb3..71893a9e4be67 100644
--- a/llvm/test/CodeGen/X86/icmp-abs-C.ll
+++ b/llvm/test/CodeGen/X86/icmp-abs-C.ll
@@ -161,8 +161,8 @@ define i16 @ne_and_with_dom_abs(i16 %x) nounwind {
; X86-LABEL: ne_and_with_dom_abs:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movswl %ax, %ecx
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: sarl $15, %ecx
; X86-NEXT: xorl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll b/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
index 02078c3575dd6..b14fedeaae57d 100644
--- a/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
+++ b/llvm/test/CodeGen/X86/icmp-pow2-logic-npow2.ll
@@ -196,8 +196,8 @@ define i1 @abs_ne_pow2(i64 %0) nounwind {
define i1 @abs_ne_nonpow2(i16 %0) nounwind {
; X86-LABEL: abs_ne_nonpow2:
; X86: # %bb.0:
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movswl %ax, %ecx
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: sarl $15, %ecx
; X86-NEXT: xorl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/known-bits.ll b/llvm/test/CodeGen/X86/known-bits.ll
index 58a0595e4322a..dbcc8eceeb037 100644
--- a/llvm/test/CodeGen/X86/known-bits.ll
+++ b/llvm/test/CodeGen/X86/known-bits.ll
@@ -7,7 +7,8 @@ define void @knownbits_zext_in_reg(ptr) nounwind {
; X86: # %bb.0: # %BB
; X86-NEXT: pushl %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzbl (%eax), %ecx
+; X86-NEXT: movzbl (%eax), %eax
+; X86-NEXT: movzwl %ax, %ecx
; X86-NEXT: imull $101, %ecx, %eax
; X86-NEXT: shrl $14, %eax
; X86-NEXT: imull $177, %ecx, %edx
@@ -31,6 +32,7 @@ define void @knownbits_zext_in_reg(ptr) nounwind {
; X64-LABEL: knownbits_zext_in_reg:
; X64: # %bb.0: # %BB
; X64-NEXT: movzbl (%rdi), %eax
+; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: imull $101, %eax, %ecx
; X64-NEXT: shrl $14, %ecx
; X64-NEXT: imull $177, %eax, %edx
diff --git a/llvm/test/CodeGen/X86/neg-abs.ll b/llvm/test/CodeGen/X86/neg-abs.ll
index 724b2dc4c431a..050610426e6bd 100644
--- a/llvm/test/CodeGen/X86/neg-abs.ll
+++ b/llvm/test/CodeGen/X86/neg-abs.ll
@@ -34,8 +34,8 @@ define i8 @neg_abs_i8(i8 %x) nounwind {
define i16 @neg_abs_i16(i16 %x) nounwind {
; X86-LABEL: neg_abs_i16:
; X86: # %bb.0:
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movswl %cx, %eax
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: sarl $15, %eax
; X86-NEXT: xorl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
@@ -182,8 +182,8 @@ define i8 @sub_abs_i8(i8 %x, i8 %y) nounwind {
define i16 @sub_abs_i16(i16 %x, i16 %y) nounwind {
; X86-LABEL: sub_abs_i16:
; X86: # %bb.0:
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movswl %cx, %eax
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: sarl $15, %eax
; X86-NEXT: xorl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index 6b5c6049f025b..fde915247760a 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -171,8 +171,8 @@ define void @load_2byte_chunk_of_4byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
-; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movb %al, (%rdx)
@@ -180,8 +180,8 @@ define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
;
; X64-BMI2-LABEL: load_1byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
+; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movb %al, (%rdx)
; X64-BMI2-NEXT: retq
@@ -248,8 +248,8 @@ define void @load_1byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
-; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
@@ -257,8 +257,8 @@ define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
;
; X64-BMI2-LABEL: load_2byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
+; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movw %ax, (%rdx)
; X64-BMI2-NEXT: retq
@@ -324,8 +324,8 @@ define void @load_2byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
define void @load_4byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
; X64-NO-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-NO-BMI2: # %bb.0:
-; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: movl (%rdi), %eax
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NO-BMI2-NEXT: shrq %cl, %rax
; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
@@ -333,8 +333,8 @@ define void @load_4byte_chunk_of_8byte_alloca_with_zero_upper_half(ptr %src, i64
;
; X64-BMI2-LABEL: load_4byte_chunk_of_8byte_alloca_with_zero_upper_half:
; X64-BMI2: # %bb.0:
-; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: movl (%rdi), %eax
+; X64-BMI2-NEXT: shll $3, %esi
; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax
; X64-BMI2-NEXT: movl %eax, (%rdx)
; X64-BMI2-NEXT: retq
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/basic.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/basic.ll.expected
index 669ad526f9f34..33dce3ca920a3 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/basic.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/basic.ll.expected
@@ -31,8 +31,8 @@ define i8 @test_i8(i8 %a) nounwind {
define i16 @test_i16(i16 %a) nounwind {
; X86-NO-CMOV-LABEL: test_i16:
; X86-NO-CMOV: # %bb.0:
-; X86-NO-CMOV-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NO-CMOV-NEXT: movswl %ax, %ecx
+; X86-NO-CMOV-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NO-CMOV-NEXT: movl %eax, %ecx
; X86-NO-CMOV-NEXT: sarl $15, %ecx
; X86-NO-CMOV-NEXT: xorl %ecx, %eax
; X86-NO-CMOV-NEXT: subl %ecx, %eax
More information about the llvm-commits
mailing list