[llvm] 346f6b5 - [ARM][MVE] Enable masked gathers from vector of pointers

Anna Welker via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 8 05:43:26 PST 2020


Author: Anna Welker
Date: 2020-01-08T13:43:12Z
New Revision: 346f6b54bd1237a9a5a2d9bb1e424b57dc178998

URL: https://github.com/llvm/llvm-project/commit/346f6b54bd1237a9a5a2d9bb1e424b57dc178998
DIFF: https://github.com/llvm/llvm-project/commit/346f6b54bd1237a9a5a2d9bb1e424b57dc178998.diff

LOG: [ARM][MVE] Enable masked gathers from vector of pointers

Adds a pass to the ARM backend that takes a v4i32
gather and transforms it into a call to MVE's
masked gather intrinsics.

Differential Revision: https://reviews.llvm.org/D71743

Added: 
    llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll

Modified: 
    llvm/lib/Target/ARM/ARM.h
    llvm/lib/Target/ARM/ARMTargetMachine.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.h
    llvm/lib/Target/ARM/CMakeLists.txt
    llvm/test/CodeGen/ARM/O3-pipeline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARM.h b/llvm/lib/Target/ARM/ARM.h
index 9076c191d839..3412813a3ef2 100644
--- a/llvm/lib/Target/ARM/ARM.h
+++ b/llvm/lib/Target/ARM/ARM.h
@@ -53,6 +53,7 @@ FunctionPass *createThumb2SizeReductionPass(
 InstructionSelector *
 createARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI,
                              const ARMRegisterBankInfo &RBI);
+Pass *createMVEGatherScatterLoweringPass();
 
 void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
                                   ARMAsmPrinter &AP);
@@ -67,6 +68,7 @@ void initializeThumb2ITBlockPass(PassRegistry &);
 void initializeMVEVPTBlockPass(PassRegistry &);
 void initializeARMLowOverheadLoopsPass(PassRegistry &);
 void initializeMVETailPredicationPass(PassRegistry &);
+void initializeMVEGatherScatterLoweringPass(PassRegistry &);
 
 } // end namespace llvm
 

diff  --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 018ce3903c2d..a48f351f37ad 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -98,6 +98,7 @@ extern "C" void LLVMInitializeARMTarget() {
   initializeMVEVPTBlockPass(Registry);
   initializeMVETailPredicationPass(Registry);
   initializeARMLowOverheadLoopsPass(Registry);
+  initializeMVEGatherScatterLoweringPass(Registry);
 }
 
 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -404,6 +405,8 @@ void ARMPassConfig::addIRPasses() {
           return ST.hasAnyDataBarrier() && !ST.isThumb1Only();
         }));
 
+  addPass(createMVEGatherScatterLoweringPass());
+
   TargetPassConfig::addIRPasses();
 
   // Run the parallel DSP pass.

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index e4b77ae56a4f..41ad8b0c04de 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -22,6 +22,7 @@
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/IR/Type.h"
 #include "llvm/MC/SubtargetFeature.h"
 #include "llvm/Support/Casting.h"
@@ -46,6 +47,8 @@ static cl::opt<bool> DisableLowOverheadLoops(
 
 extern cl::opt<bool> DisableTailPredication;
 
+extern cl::opt<bool> EnableMaskedGatherScatters;
+
 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
                                      const Function *Callee) const {
   const TargetMachine &TM = getTLI()->getTargetMachine();
@@ -514,6 +517,27 @@ bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
          (EltWidth == 8);
 }
 
+bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) {
+  if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
+    return false;
+
+  // This method is called in 2 places:
+  //  - from the vectorizer with a scalar type, in which case we need to get
+  //  this as good as we can with the limited info we have (and rely on the cost
+  //  model for the rest).
+  //  - from the masked intrinsic lowering pass with the actual vector type.
+  // For MVE, we have a custom lowering pass that will already have custom
+  // legalised any gathers that we can to MVE intrinsics, and want to expand all
+  // the rest. The pass runs before the masked intrinsic lowering pass, so if we
+  // are here, we know we want to expand.
+  if (isa<VectorType>(Ty))
+    return false;
+
+  unsigned EltWidth = Ty->getScalarSizeInBits();
+  return ((EltWidth == 32 && (!Alignment || Alignment >= 4)) ||
+          (EltWidth == 16 && (!Alignment || Alignment >= 2)) || EltWidth == 8);
+}
+
 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
   assert(MI && "MemcpyInst expected");

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 6888c8924fc6..880588adfdfd 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -159,7 +159,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
     return isLegalMaskedLoad(DataTy, Alignment);
   }
 
-  bool isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) { return false; }
+  bool isLegalMaskedGather(Type *Ty, MaybeAlign Alignment);
 
   bool isLegalMaskedScatter(Type *Ty, MaybeAlign Alignment) { return false; }
 

diff  --git a/llvm/lib/Target/ARM/CMakeLists.txt b/llvm/lib/Target/ARM/CMakeLists.txt
index b94a78ea9404..7591701857b9 100644
--- a/llvm/lib/Target/ARM/CMakeLists.txt
+++ b/llvm/lib/Target/ARM/CMakeLists.txt
@@ -51,6 +51,7 @@ add_llvm_target(ARMCodeGen
   ARMTargetObjectFile.cpp
   ARMTargetTransformInfo.cpp
   MLxExpansionPass.cpp
+  MVEGatherScatterLowering.cpp
   MVETailPredication.cpp
   MVEVPTBlockPass.cpp
   Thumb1FrameLowering.cpp

diff  --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
new file mode 100644
index 000000000000..4657a043dba1
--- /dev/null
+++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
@@ -0,0 +1,177 @@
+//===- ARMGatherScatterLowering.cpp - Gather/Scatter lowering -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// This pass custom lowers llvm.gather and llvm.scatter instructions to
+/// arm.mve.gather and arm.mve.scatter intrinsics, optimising the code to
+/// produce a better final result as we go.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMSubtarget.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mve-gather-scatter-lowering"
+
+cl::opt<bool> EnableMaskedGatherScatters(
+    "enable-arm-maskedgatscat", cl::Hidden, cl::init(false),
+    cl::desc("Enable the generation of masked gathers and scatters"));
+
+namespace {
+
+class MVEGatherScatterLowering : public FunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  explicit MVEGatherScatterLowering() : FunctionPass(ID) {
+    initializeMVEGatherScatterLoweringPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override;
+
+  StringRef getPassName() const override {
+    return "MVE gather/scatter lowering";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addRequired<TargetPassConfig>();
+    FunctionPass::getAnalysisUsage(AU);
+  }
+};
+
+} // end anonymous namespace
+
+char MVEGatherScatterLowering::ID = 0;
+
+INITIALIZE_PASS(MVEGatherScatterLowering, DEBUG_TYPE,
+                "MVE gather/scattering lowering pass", false, false)
+
+Pass *llvm::createMVEGatherScatterLoweringPass() {
+  return new MVEGatherScatterLowering();
+}
+
+static bool isLegalTypeAndAlignment(unsigned NumElements, unsigned ElemSize,
+                                    unsigned Alignment) {
+  // Do only allow non-extending v4i32 gathers for now
+  return NumElements == 4 && ElemSize == 32 && Alignment >= 4;
+}
+
+static bool LowerGather(IntrinsicInst *I) {
+  using namespace PatternMatch;
+  LLVM_DEBUG(dbgs() << "masked gathers: checking transform preconditions\n");
+
+  // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
+  // Attempt to turn the masked gather in I into a MVE intrinsic
+  // Potentially optimising the addressing modes as we do so.
+  Type *Ty = I->getType();
+  Value *Ptr = I->getArgOperand(0);
+  unsigned Alignment = cast<ConstantInt>(I->getArgOperand(1))->getZExtValue();
+  Value *Mask = I->getArgOperand(2);
+  Value *PassThru = I->getArgOperand(3);
+
+  // Check this is a valid gather with correct alignment
+  if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(),
+                               Ty->getScalarSizeInBits(), Alignment)) {
+    LLVM_DEBUG(dbgs() << "masked gathers: instruction does not have valid "
+                      << "alignment or vector type \n");
+    return false;
+  }
+
+  IRBuilder<> Builder(I->getContext());
+  Builder.SetInsertPoint(I);
+  Builder.SetCurrentDebugLocation(I->getDebugLoc());
+
+  Value *Load = nullptr;
+  // Look through bitcast instruction if #elements is the same
+  if (auto *BitCast = dyn_cast<BitCastInst>(Ptr)) {
+    Type *BCTy = BitCast->getType();
+    Type *BCSrcTy = BitCast->getOperand(0)->getType();
+    if (BCTy->getVectorNumElements() == BCSrcTy->getVectorNumElements()) {
+      LLVM_DEBUG(dbgs() << "masked gathers: looking through bitcast\n");
+      Ptr = BitCast->getOperand(0);
+    }
+  }
+  assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
+
+  if (Ty->getVectorNumElements() != 4)
+    // Can't build an intrinsic for this
+    return false;
+  if (match(Mask, m_One()))
+    Load = Builder.CreateIntrinsic(Intrinsic::arm_mve_vldr_gather_base,
+                                   {Ty, Ptr->getType()},
+                                   {Ptr, Builder.getInt32(0)});
+  else
+    Load =
+        Builder.CreateIntrinsic(Intrinsic::arm_mve_vldr_gather_base_predicated,
+                                {Ty, Ptr->getType(), Mask->getType()},
+                                {Ptr, Builder.getInt32(0), Mask});
+
+  if (!isa<UndefValue>(PassThru) && !match(PassThru, m_Zero())) {
+    LLVM_DEBUG(dbgs() << "masked gathers: found non-trivial passthru - "
+                      << "creating select\n");
+    Load = Builder.CreateSelect(Mask, Load, PassThru);
+  }
+
+  LLVM_DEBUG(dbgs() << "masked gathers: successfully built masked gather\n");
+  I->replaceAllUsesWith(Load);
+  I->eraseFromParent();
+  return true;
+}
+
+bool MVEGatherScatterLowering::runOnFunction(Function &F) {
+  if (!EnableMaskedGatherScatters)
+    return false;
+  auto &TPC = getAnalysis<TargetPassConfig>();
+  auto &TM = TPC.getTM<TargetMachine>();
+  auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
+  if (!ST->hasMVEIntegerOps())
+    return false;
+  SmallVector<IntrinsicInst *, 4> Gathers;
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
+      if (II && II->getIntrinsicID() == Intrinsic::masked_gather)
+        Gathers.push_back(II);
+    }
+  }
+
+  if (Gathers.empty())
+    return false;
+
+  for (IntrinsicInst *I : Gathers)
+    LowerGather(I);
+
+  return true;
+}

diff  --git a/llvm/test/CodeGen/ARM/O3-pipeline.ll b/llvm/test/CodeGen/ARM/O3-pipeline.ll
index 3e9431403af3..42bad7c74875 100644
--- a/llvm/test/CodeGen/ARM/O3-pipeline.ll
+++ b/llvm/test/CodeGen/ARM/O3-pipeline.ll
@@ -7,6 +7,7 @@
 ; CHECK-NEXT:    FunctionPass Manager
 ; CHECK-NEXT:      Expand Atomic instructions
 ; CHECK-NEXT:      Simplify the CFG
+; CHECK-NEXT:      MVE gather/scatter lowering
 ; CHECK-NEXT:      Dominator Tree Construction
 ; CHECK-NEXT:      Basic Alias Analysis (stateless AA impl)
 ; CHECK-NEXT:      Module Verifier

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
new file mode 100644
index 000000000000..f75379160d78
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
@@ -0,0 +1,461 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat %s -o 2>/dev/null - | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x i32> @zext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: zext_scaled_i16_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: sext_scaled_i16_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32(i32* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: scaled_i32_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+; TODO: scaled_f16_i32
+
+define arm_aapcs_vfpcc <4 x float> @scaled_f32_i32(i32* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: scaled_f32_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i16(i32* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: unsigned_scaled_b_i32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i16(i32* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: signed_scaled_i32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i16(i32* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: a_unsigned_scaled_f32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i16(i32* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: b_signed_scaled_f32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_signed_scaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_signed_scaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_unsigned_scaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_unsigned_scaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i8(i32* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: unsigned_scaled_b_i32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i8(i32* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: signed_scaled_i32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i8(i32* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: a_unsigned_scaled_f32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i8(i32* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: b_signed_scaled_f32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #2
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_signed_scaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_signed_scaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_unsigned_scaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_unsigned_scaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vshl.i32 q0, q0, #1
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
new file mode 100644
index 000000000000..fcb7dd8f87d6
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
@@ -0,0 +1,742 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat %s -o 2>/dev/null - | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: zext_unscaled_i8_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.zext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: sext_unscaled_i8_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: zext_unscaled_i16_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: sext_unscaled_i16_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: unscaled_i32_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @unscaled_f32_i32(i8* %base, <4 x i32>* %offptr) {
+; CHECK-LABEL: unscaled_f32_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: unsigned_unscaled_b_i32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: signed_unscaled_i32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: a_unsigned_unscaled_f32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: b_signed_unscaled_f32_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_signed_unscaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_signed_unscaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_unsigned_unscaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_unsigned_unscaled_i16_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_signed_unscaled_i8_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.zext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_signed_unscaled_i8_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: zext_unsigned_unscaled_i8_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.zext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+; CHECK-LABEL: sext_unsigned_unscaled_i8_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: unsigned_unscaled_b_i32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: signed_unscaled_i32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: a_unsigned_unscaled_f32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: b_signed_unscaled_f32_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q1, q0, r0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_signed_unscaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_signed_unscaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_unsigned_unscaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_unsigned_unscaled_i16_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.sext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_signed_unscaled_i8_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.zext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_signed_unscaled_i8_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.sext = sext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: zext_unsigned_unscaled_i8_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.zext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+; CHECK-LABEL: sext_unsigned_unscaled_i8_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
+declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
new file mode 100644
index 000000000000..b595a20bf497
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
@@ -0,0 +1,850 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -enable-arm-maskedgatscat %s -o - | FileCheck %s
+
+; i32
+
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i32(<2 x i32*>* %offptr) {
+; CHECK-LABEL: ptr_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldrd r1, r0, [r0]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <2 x i32*>, <2 x i32*>* %offptr, align 4
+  %gather = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+  ret <2 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i32(<4 x i32*>* %offptr) {
+; CHECK-LABEL: ptr_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32*>, <4 x i32*>* %offptr, align 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i32(<8 x i32*>* %offptr) {
+; CHECK-LABEL: ptr_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldr.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    ldr.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    ldr r3, [r3]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    ldr r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    ldr r4, [r4]
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov.32 q0[2], r12
+; CHECK-NEXT:    vmov.32 q0[3], lr
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    ldr r2, [r2]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r4
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i32*>, <8 x i32*>* %offptr, align 4
+  %gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+  ret <8 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <16 x i32> @ptr_v16i32(<16 x i32*>* %offptr) {
+; CHECK-LABEL: ptr_v16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r5, s4
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov r6, s7
+; CHECK-NEXT:    vmov r4, s11
+; CHECK-NEXT:    ldr.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ldr r5, [r5]
+; CHECK-NEXT:    ldr r2, [r2]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    ldr r6, [r6]
+; CHECK-NEXT:    ldr r4, [r4]
+; CHECK-NEXT:    ldr.w lr, [r1]
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    ldr r3, [r1]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov.32 q0[0], r5
+; CHECK-NEXT:    vmov r5, s5
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    ldr r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov r5, s6
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    ldr r5, [r5]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r5
+; CHECK-NEXT:    vmov r5, s8
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.32 q0[3], r6
+; CHECK-NEXT:    ldr r5, [r5]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov r0, s15
+; CHECK-NEXT:    vmov.32 q3[0], lr
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.32 q2[2], r12
+; CHECK-NEXT:    vmov.32 q2[3], r4
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %offs = load <16 x i32*>, <16 x i32*>* %offptr, align 4
+  %gather = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> %offs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i32> undef)
+  ret <16 x i32> %gather
+}
+
+; f32
+
+define arm_aapcs_vfpcc <2 x float> @ptr_v2f32(<2 x float*>* %offptr) {
+; CHECK-LABEL: ptr_v2f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldrd r1, r0, [r0]
+; CHECK-NEXT:    vldr s1, [r0]
+; CHECK-NEXT:    vldr s0, [r1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <2 x float*>, <2 x float*>* %offptr, align 4
+  %gather = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> %offs, i32 4, <2 x i1> <i1 true, i1 true>, <2 x float> undef)
+  ret <2 x float> %gather
+}
+
+define arm_aapcs_vfpcc <4 x float> @ptr_v4f32(<4 x float*>* %offptr) {
+; CHECK-LABEL: ptr_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x float*>, <4 x float*>* %offptr, align 4
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
+define arm_aapcs_vfpcc <8 x float> @ptr_v8f32(<8 x float*>* %offptr) {
+; CHECK-LABEL: ptr_v8f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vldr s3, [r1]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vldr s2, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    vldr s1, [r1]
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vldr s7, [r0]
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vldr s0, [r1]
+; CHECK-NEXT:    vldr s6, [r0]
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    vldr s5, [r0]
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vldr s4, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <8 x float*>, <8 x float*>* %offptr, align 4
+  %gather = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef)
+  ret <8 x float> %gather
+}
+
+; i16
+
+define arm_aapcs_vfpcc <8 x i16> @ptr_i16(<8 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrh.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    ldrh.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    ldrh r5, [r5]
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.16 q0[1], r5
+; CHECK-NEXT:    ldrh r4, [r4]
+; CHECK-NEXT:    vmov.16 q0[2], r12
+; CHECK-NEXT:    vmov.16 q0[3], lr
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    vmov.16 q0[5], r1
+; CHECK-NEXT:    vmov.16 q0[6], r2
+; CHECK-NEXT:    vmov.16 q0[7], r4
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  ret <8 x i16> %gather
+}
+
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_sext(<2 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v2i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldrd r1, r0, [r0]
+; CHECK-NEXT:    ldrsh.w r0, [r0]
+; CHECK-NEXT:    ldrsh.w r1, [r1]
+; CHECK-NEXT:    asrs r2, r0, #31
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <2 x i16*>, <2 x i16*>* %offptr, align 4
+  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
+  %ext = sext <2 x i16> %gather to <2 x i32>
+  ret <2 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <2 x i32> @ptr_v2i16_zext(<2 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v2i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldrd r1, r0, [r0]
+; CHECK-NEXT:    adr r2, .LCPI9_0
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vand q0, q1, q0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:    .long 65535 @ 0xffff
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 65535 @ 0xffff
+; CHECK-NEXT:    .long 0 @ 0x0
+entry:
+  %offs = load <2 x i16*>, <2 x i16*>* %offptr, align 4
+  %gather = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %offs, i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
+  %ext = zext <2 x i16> %gather to <2 x i32>
+  ret <2 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_sext(<4 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v4i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_zext(<4 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v4i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_sext(<8 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v8i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrh.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    ldrh.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    ldrh r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    ldrh r4, [r4]
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov.32 q0[2], r12
+; CHECK-NEXT:    vmov.32 q0[3], lr
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r4
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ext = sext <8 x i16> %gather to <8 x i32>
+  ret <8 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i16_zext(<8 x i16*>* %offptr) {
+; CHECK-LABEL: ptr_v8i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrh.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    ldrh.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    ldrh r3, [r3]
+; CHECK-NEXT:    ldrh r0, [r0]
+; CHECK-NEXT:    ldrh r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    ldrh r4, [r4]
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov.32 q0[2], r12
+; CHECK-NEXT:    vmov.32 q0[3], lr
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    ldrh r1, [r1]
+; CHECK-NEXT:    ldrh r2, [r2]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r4
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ext = zext <8 x i16> %gather to <8 x i32>
+  ret <8 x i32> %ext
+}
+
+; f16
+
+define arm_aapcs_vfpcc <8 x half> @ptr_f16(<8 x half*>* %offptr) {
+; CHECK-LABEL: ptr_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vldr.16 s0, [r2]
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov.16 q0[0], r2
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vldr.16 s8, [r1]
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q0[2], r1
+; CHECK-NEXT:    vmov r1, s7
+; CHECK-NEXT:    vldr.16 s4, [r1]
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vldr.16 s8, [r0]
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vldr.16 s8, [r0]
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vldr.16 s8, [r0]
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vldr.16 s4, [r0]
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <8 x half*>, <8 x half*>* %offptr, align 4
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  ret <8 x half> %gather
+}
+
+; i8
+
+define arm_aapcs_vfpcc <16 x i8> @ptr_i8(<16 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r5, s8
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r6, s11
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrb.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r6, [r6]
+; CHECK-NEXT:    ldrb r4, [r4]
+; CHECK-NEXT:    ldrb.w lr, [r1]
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    ldrb r3, [r1]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov.8 q0[0], r5
+; CHECK-NEXT:    vmov r5, s9
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    vmov.8 q0[1], r5
+; CHECK-NEXT:    vmov r5, s10
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    vmov.8 q0[2], r5
+; CHECK-NEXT:    vmov r5, s4
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.8 q0[3], r6
+; CHECK-NEXT:    vmov.8 q0[4], r0
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.8 q0[5], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.8 q0[6], r0
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.8 q0[7], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.8 q0[8], r5
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.8 q0[9], r0
+; CHECK-NEXT:    vmov.8 q0[10], r12
+; CHECK-NEXT:    vmov.8 q0[11], r4
+; CHECK-NEXT:    vmov.8 q0[12], lr
+; CHECK-NEXT:    vmov.8 q0[13], r3
+; CHECK-NEXT:    vmov.8 q0[14], r1
+; CHECK-NEXT:    vmov.8 q0[15], r2
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %offs = load <16 x i8*>, <16 x i8*>* %offptr, align 4
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %offs, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  ret <16 x i8> %gather
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_sext16(<8 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v8i8_sext16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r5, s0
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrb.w r12, [r2]
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    ldrb.w lr, [r1]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.16 q0[0], r5
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.16 q0[1], lr
+; CHECK-NEXT:    ldrb r4, [r4]
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov.16 q0[3], r12
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov.16 q0[6], r1
+; CHECK-NEXT:    vmov.16 q0[7], r4
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ext = sext <8 x i8> %gather to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define arm_aapcs_vfpcc <8 x i16> @ptr_v8i8_zext16(<8 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v8i8_zext16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r5, s0
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrb.w r12, [r2]
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    ldrb.w lr, [r1]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.16 q0[0], r5
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.16 q0[1], lr
+; CHECK-NEXT:    ldrb r4, [r4]
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    vmov.16 q0[3], r12
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov.16 q0[6], r1
+; CHECK-NEXT:    vmov.16 q0[7], r4
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ext = zext <8 x i8> %gather to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_sext32(<4 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v4i8_sext32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i8_zext32(<4 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v4i8_zext32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ext = zext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_sext32(<8 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v8i8_sext32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    ldrb.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    ldrb.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    ldrb r4, [r4]
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov.32 q0[2], r12
+; CHECK-NEXT:    vmov.32 q0[3], lr
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r4
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ext = sext <8 x i8> %gather to <8 x i32>
+  ret <8 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <8 x i32> @ptr_v8i8_zext32(<8 x i8*>* %offptr) {
+; CHECK-LABEL: ptr_v8i8_zext32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r4, s0
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    ldrb.w r12, [r1]
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    ldrb.w lr, [r2]
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    ldrb r0, [r0]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    ldrb r4, [r4]
+; CHECK-NEXT:    ldrb r3, [r3]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    ldrb r5, [r5]
+; CHECK-NEXT:    vmov.32 q0[0], r4
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q0[1], r5
+; CHECK-NEXT:    vmov.32 q0[2], r12
+; CHECK-NEXT:    vmov.32 q0[3], lr
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    ldrb r1, [r1]
+; CHECK-NEXT:    ldrb r2, [r2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ext = zext <8 x i8> %gather to <8 x i32>
+  ret <8 x i32> %ext
+}
+
+; loops
+
+define void @foo_ptr_p_int32_t(i32* %dest, i32** %src, i32 %n) {
+; CHECK-LABEL: foo_ptr_p_int32_t:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    cmp r2, #1
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    poplt {r7, pc}
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    subs r1, #16
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB22_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r1, #16]!
+; CHECK-NEXT:    vptt.i32 ne, q0, zr
+; CHECK-NEXT:    vldrwt.u32 q1, [q0]
+; CHECK-NEXT:    vstrwt.32 q1, [r0], #16
+; CHECK-NEXT:    le lr, .LBB22_1
+; CHECK-NEXT:  @ %bb.2: @ %for.end
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %and = and i32 %n, -16
+  %cmp11 = icmp sgt i32 %and, 0
+  br i1 %cmp11, label %vector.body, label %for.end
+
+vector.body:                                      ; preds = %entry, %vector.body
+  %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
+  %0 = getelementptr inbounds i32*, i32** %src, i32 %index
+  %1 = bitcast i32** %0 to <4 x i32*>*
+  %wide.load = load <4 x i32*>, <4 x i32*>* %1, align 4
+  %2 = icmp ne <4 x i32*> %wide.load, zeroinitializer
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %wide.load, i32 4, <4 x i1> %2, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, i32* %dest, i32 %index
+  %4 = bitcast i32* %3 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %4, i32 4, <4 x i1> %2)
+  %index.next = add i32 %index, 4
+  %5 = icmp eq i32 %index.next, %and
+  br i1 %5, label %for.end, label %vector.body
+
+for.end:                                          ; preds = %vector.body, %entry
+  ret void
+}
+
+define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
+; CHECK-LABEL: foo_ptr_p_float:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    cmp r2, #1
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    poplt {r7, pc}
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    subs r1, #16
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB23_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r1, #16]!
+; CHECK-NEXT:    vptt.i32 ne, q0, zr
+; CHECK-NEXT:    vldrwt.u32 q1, [q0]
+; CHECK-NEXT:    vstrwt.32 q1, [r0], #16
+; CHECK-NEXT:    le lr, .LBB23_1
+; CHECK-NEXT:  @ %bb.2: @ %for.end
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %and = and i32 %n, -16
+  %cmp11 = icmp sgt i32 %and, 0
+  br i1 %cmp11, label %vector.body, label %for.end
+
+vector.body:                                      ; preds = %entry, %vector.body
+  %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
+  %0 = getelementptr inbounds float*, float** %src, i32 %index
+  %1 = bitcast float** %0 to <4 x float*>*
+  %wide.load = load <4 x float*>, <4 x float*>* %1, align 4
+  %2 = icmp ne <4 x float*> %wide.load, zeroinitializer
+  %3 = bitcast <4 x float*> %wide.load to <4 x i32*>
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> %2, <4 x i32> undef)
+  %4 = getelementptr inbounds float, float* %dest, i32 %index
+  %5 = bitcast float* %4 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %5, i32 4, <4 x i1> %2)
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, %and
+  br i1 %6, label %for.end, label %vector.body
+
+for.end:                                          ; preds = %vector.body, %entry
+  ret void
+}
+
+declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>)
+declare <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
+declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1>, <2 x float>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
+declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>)
+declare <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>)
+declare <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>)
+declare <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*>, i32, <16 x i1>, <16 x half>)
+declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
+declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
new file mode 100644
index 000000000000..70ad30aa60c8
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp %s -o 2>/dev/null - | FileCheck --check-prefix NOGATSCAT %s
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=-mve -enable-arm-maskedgatscat %s -o 2>/dev/null - | FileCheck --check-prefix NOMVE %s
+
+define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32(i8* %base, <4 x i32>* %offptr) {
+; NOGATSCAT-LABEL: unscaled_i32_i32:
+; NOGATSCAT:       @ %bb.0: @ %entry
+; NOGATSCAT-NEXT:    vldrw.u32	q0, [r1]
+; NOGATSCAT-NEXT:    vadd.i32	q0, q0, r0
+; NOGATSCAT-NEXT:    vmov r0, s0
+; NOGATSCAT-NEXT:    vmov r3, s1
+; NOGATSCAT-NEXT:    vmov r1, s2
+; NOGATSCAT-NEXT:    vmov r2, s3
+; NOGATSCAT-NEXT:    ldr r0, [r0]
+; NOGATSCAT-NEXT:    ldr r3, [r3]
+; NOGATSCAT-NEXT:    vmov.32 q0[0], r0
+; NOGATSCAT-NEXT:    ldr r1, [r1]
+; NOGATSCAT-NEXT:    vmov.32 q0[1], r3
+; NOGATSCAT-NEXT:    ldr r2, [r2]
+; NOGATSCAT-NEXT:    vmov.32 q0[2], r1
+; NOGATSCAT-NEXT:    vmov.32 q0[3], r2
+; NOGATSCAT-NEXT:    bx lr
+
+; NOMVE-LABEL: unscaled_i32_i32:
+; NOMVE:       @ %bb.0: @ %entry
+; NOMVE-NEXT:    .save	{r4, lr}
+; NOMVE-NEXT:    push	{r4, lr}
+; NOMVE-NEXT:    ldm.w	r1, {r2, r3, lr}
+; NOMVE-NEXT:    ldr	r4, [r1, #12]
+; NOMVE-NEXT:    ldr.w	r12, [r0, r2]
+; NOMVE-NEXT:    ldr	r1, [r0, r3]
+; NOMVE-NEXT:    ldr.w	r2, [r0, lr]
+; NOMVE-NEXT:    ldr	r3, [r0, r4]
+; NOMVE-NEXT:    mov	r0, r12
+; NOMVE-NEXT:    pop	{r4, pc}
+
+entry:
+  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)


        


More information about the llvm-commits mailing list