[llvm] bde2432 - Revert "[Asan] Provide TTI hook to provide memory reference infromation of target intrinsics. (#97070)"
Jeremy Morse via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 8 04:26:50 PDT 2024
Author: Jeremy Morse
Date: 2024-08-08T12:26:05+01:00
New Revision: bde243259b3cc4404a82333a31a5a13c83ec6cb7
URL: https://github.com/llvm/llvm-project/commit/bde243259b3cc4404a82333a31a5a13c83ec6cb7
DIFF: https://github.com/llvm/llvm-project/commit/bde243259b3cc4404a82333a31a5a13c83ec6cb7.diff
LOG: Revert "[Asan] Provide TTI hook to provide memory reference infromation of target intrinsics. (#97070)"
This reverts commit e8ad87c7d06afe8f5dde2e4c7f13c314cb3a99e9.
This reverts commit d3c9bb0cf811424dcb8c848cf06773dbdde19965.
A few buildbots trip up on asan-rvv-intrinsics.ll. I've also reverted
the follow-up commit d3c9bb0cf8.
https://lab.llvm.org/buildbot/#/builders/46/builds/2895
Added:
Modified:
llvm/include/llvm/Analysis/TargetTransformInfo.h
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
llvm/lib/Analysis/TargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Removed:
llvm/include/llvm/Analysis/MemoryRefInfo.h
llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
################################################################################
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h
deleted file mode 100644
index 094141e22aa67b..00000000000000
--- a/llvm/include/llvm/Analysis/MemoryRefInfo.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===--------- Definition of the MemoryRefInfo class -*- C++ -*------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines MemoryRefInfo class that is used when getting
-// the information of a memory reference instruction.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_ANALYSIS_MEMORYREFINFO_H
-#define LLVM_ANALYSIS_MEMORYREFINFO_H
-
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/Support/TypeSize.h"
-
-namespace llvm {
-class MemoryRefInfo {
-public:
- Use *PtrUse = nullptr;
- bool IsWrite;
- Type *OpType;
- TypeSize TypeStoreSize = TypeSize::getFixed(0);
- MaybeAlign Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
- // The EVL Value, if we're looking at a vp intrinsic.
- Value *MaybeEVL;
- // The Stride Value, if we're looking at a strided load/store.
- Value *MaybeStride;
-
- MemoryRefInfo() = default;
- MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, MaybeAlign Alignment,
- Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
- const DataLayout &DL = I->getDataLayout();
- TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
- Value *getPtr() { return PtrUse->get(); }
- operator bool() { return PtrUse != nullptr; }
-};
-
-} // namespace llvm
-#endif
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index be01fdfdde319a..38e8b9da213974 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -23,7 +23,6 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallBitVector.h"
-#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
@@ -962,10 +961,6 @@ class TargetTransformInfo {
MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const;
- // Add MemoryRefInfo of Intrinsic \p II into array \p Interesting.
- bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const;
-
/// Should the Select Optimization pass be enabled and ran.
bool enableSelectOptimize() const;
@@ -1956,8 +1951,6 @@ class TargetTransformInfo::Concept {
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
- virtual bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const = 0;
virtual bool enableSelectOptimize() = 0;
virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0;
virtual bool enableInterleavedAccessVectorization() = 0;
@@ -2517,12 +2510,6 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
bool IsZeroCmp) const override {
return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
}
-
- bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const override {
- return Impl.getMemoryRefInfo(Interesting, II);
- }
-
bool enableSelectOptimize() override {
return Impl.enableSelectOptimize();
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 6bcae9c9e4eaa6..d208a710bb27fd 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -396,11 +396,6 @@ class TargetTransformInfoImplBase {
return {};
}
- bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const {
- return false;
- }
-
bool enableSelectOptimize() const { return true; }
bool shouldTreatInstructionLikeSelect(const Instruction *I) {
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index f7bd36c2def030..9fe2716220e83d 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -14,7 +14,6 @@
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/MemoryRefInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
@@ -23,6 +22,37 @@
namespace llvm {
+class InterestingMemoryOperand {
+public:
+ Use *PtrUse;
+ bool IsWrite;
+ Type *OpType;
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
+ MaybeAlign Alignment;
+ // The mask Value, if we're looking at a masked load/store.
+ Value *MaybeMask;
+ // The EVL Value, if we're looking at a vp intrinsic.
+ Value *MaybeEVL;
+ // The Stride Value, if we're looking at a strided load/store.
+ Value *MaybeStride;
+
+ InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
+ class Type *OpType, MaybeAlign Alignment,
+ Value *MaybeMask = nullptr,
+ Value *MaybeEVL = nullptr,
+ Value *MaybeStride = nullptr)
+ : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
+ const DataLayout &DL = I->getDataLayout();
+ TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
+ PtrUse = &I->getOperandUse(OperandNo);
+ }
+
+ Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+
+ Value *getPtr() { return PtrUse->get(); }
+};
+
// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
bool IsKasan, uint64_t *ShadowBase,
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index e04319f37f4535..dcde78925bfa98 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -626,11 +626,6 @@ TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
}
-bool TargetTransformInfo::getMemoryRefInfo(
- SmallVectorImpl<MemoryRefInfo> &Interesting, IntrinsicInst *II) const {
- return TTIImpl->getMemoryRefInfo(Interesting, II);
-}
-
bool TargetTransformInfo::enableSelectOptimize() const {
return TTIImpl->enableSelectOptimize();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 9bdea0bba8f1a8..593fca5bc3ed68 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -181,7 +181,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
void getInterestingMemoryOperands(
Module &M, Instruction *I,
- SmallVectorImpl<MemoryRefInfo> &Interesting) {
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
const DataLayout &DL = M.getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
index 4f1874e74bf915..64d78c4aeb6925 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
@@ -52,7 +52,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
/// that needs to be instrumented
void getInterestingMemoryOperands(
Module &M, Instruction *I,
- SmallVectorImpl<MemoryRefInfo> &Interesting);
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting);
} // end namespace AMDGPU
} // end namespace llvm
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index fb2b7035c7fc2b..4cd904c039a984 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -14,7 +14,6 @@
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
@@ -37,83 +36,6 @@ static cl::opt<unsigned> SLPMaxVF(
"exclusively by SLP vectorizer."),
cl::Hidden);
-bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const {
- const DataLayout &DL = getDataLayout();
- Intrinsic::ID IID = II->getIntrinsicID();
- LLVMContext &C = II->getContext();
- bool HasMask = false;
-
- switch (IID) {
- case Intrinsic::riscv_vle_mask:
- case Intrinsic::riscv_vse_mask:
- HasMask = true;
- [[fallthrough]];
- case Intrinsic::riscv_vle:
- case Intrinsic::riscv_vse: {
- // Intrinsic interface:
- // riscv_vle(merge, ptr, vl)
- // riscv_vle_mask(merge, ptr, mask, vl, policy)
- // riscv_vse(val, ptr, vl)
- // riscv_vse_mask(val, ptr, mask, vl, policy)
- bool IsWrite = II->getType()->isVoidTy();
- Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
- const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
- unsigned VLIndex = RVVIInfo->VLOperand;
- unsigned PtrOperandNo = VLIndex - 1 - HasMask;
- MaybeAlign Alignment =
- II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
- Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
- Value *Mask = ConstantInt::getTrue(MaskType);
- if (HasMask)
- Mask = II->getArgOperand(VLIndex - 1);
- Value *EVL = II->getArgOperand(VLIndex);
- Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
- EVL);
- return true;
- }
- case Intrinsic::riscv_vlse_mask:
- case Intrinsic::riscv_vsse_mask:
- HasMask = true;
- [[fallthrough]];
- case Intrinsic::riscv_vlse:
- case Intrinsic::riscv_vsse: {
- // Intrinsic interface:
- // riscv_vlse(merge, ptr, stride, vl)
- // riscv_vlse_mask(merge, ptr, stride, mask, vl, policy)
- // riscv_vsse(val, ptr, stride, vl)
- // riscv_vsse_mask(val, ptr, stride, mask, vl, policy)
- bool IsWrite = II->getType()->isVoidTy();
- Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
- const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID);
- unsigned VLIndex = RVVIInfo->VLOperand;
- unsigned PtrOperandNo = VLIndex - 2 - HasMask;
- MaybeAlign Alignment =
- II->getArgOperand(PtrOperandNo)->getPointerAlignment(DL);
-
- Value *Stride = II->getArgOperand(PtrOperandNo + 1);
- // Use the pointer alignment as the element alignment if the stride is a
- // multiple of the pointer alignment. Otherwise, the element alignment
- // should be the greatest common divisor of pointer alignment and stride.
- // For simplicity, just consider unalignment for elements.
- unsigned PointerAlign = Alignment.valueOrOne().value();
- if (!isa<ConstantInt>(Stride) ||
- cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
- Alignment = Align(1);
-
- Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C));
- Value *Mask = ConstantInt::getTrue(MaskType);
- if (HasMask)
- Mask = II->getArgOperand(VLIndex - 1);
- Value *EVL = II->getArgOperand(VLIndex);
- Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Alignment, Mask,
- EVL, Stride);
- return true;
- }
- }
- return false;
-}
-
InstructionCost
RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
TTI::TargetCostKind CostKind) {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index e9a4721c378900..9c37a4f6ec2d04 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -60,9 +60,6 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
: BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
- bool getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
- IntrinsicInst *II) const;
-
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5fc4d826e00158..0ff5e9b815adfc 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -29,7 +29,6 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Demangle/Demangle.h"
@@ -755,13 +754,12 @@ struct AddressSanitizer {
bool isInterestingAlloca(const AllocaInst &AI);
bool ignoreAccess(Instruction *Inst, Value *Ptr);
- void getMemoryRefInfos(Instruction *I,
- SmallVectorImpl<MemoryRefInfo> &Interesting,
- const TargetTransformInfo *TTI);
+ void getInterestingMemoryOperands(
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
- void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, MemoryRefInfo &O,
- bool UseCalls, const DataLayout &DL,
- RuntimeCallInserter &RTCI);
+ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
+ InterestingMemoryOperand &O, bool UseCalls,
+ const DataLayout &DL, RuntimeCallInserter &RTCI);
void instrumentPointerComparisonOrSubtraction(Instruction *I,
RuntimeCallInserter &RTCI);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
@@ -797,8 +795,7 @@ struct AddressSanitizer {
void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
- bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI);
+ bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
@@ -1267,8 +1264,7 @@ PreservedAnalyses AddressSanitizerPass::run(Module &M,
Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
- Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
+ Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
}
Modified |= ModuleSanitizer.instrumentModule(M);
if (!Modified)
@@ -1405,9 +1401,8 @@ bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
return false;
}
-void AddressSanitizer::getMemoryRefInfos(
- Instruction *I, SmallVectorImpl<MemoryRefInfo> &Interesting,
- const TargetTransformInfo *TTI) {
+void AddressSanitizer::getInterestingMemoryOperands(
+ Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return;
@@ -1525,9 +1520,6 @@ void AddressSanitizer::getMemoryRefInfos(
break;
}
default:
- if (auto *II = dyn_cast<IntrinsicInst>(I))
- if (TTI->getMemoryRefInfo(Interesting, II))
- return;
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
ignoreAccess(I, CI->getArgOperand(ArgNo)))
@@ -1690,7 +1682,7 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- MemoryRefInfo &O, bool UseCalls,
+ InterestingMemoryOperand &O, bool UseCalls,
const DataLayout &DL,
RuntimeCallInserter &RTCI) {
Value *Addr = O.getPtr();
@@ -2948,8 +2940,7 @@ bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
}
bool AddressSanitizer::instrumentFunction(Function &F,
- const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI) {
+ const TargetLibraryInfo *TLI) {
if (F.empty())
return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
@@ -2989,7 +2980,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
- SmallVector<MemoryRefInfo, 16> OperandsToInstrument;
+ SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
SmallVector<Instruction *, 8> NoReturnCalls;
SmallVector<BasicBlock *, 16> AllBlocks;
@@ -3005,8 +2996,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// Skip instructions inserted by another instrumentation.
if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
continue;
- SmallVector<MemoryRefInfo, 1> InterestingOperands;
- getMemoryRefInfos(&Inst, InterestingOperands, TTI);
+ SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
+ getInterestingMemoryOperands(&Inst, InterestingOperands);
if (!InterestingOperands.empty()) {
for (auto &Operand : InterestingOperands) {
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 83821c8542b942..812874ff3c1739 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -339,14 +339,16 @@ class HWAddressSanitizer {
LoopInfo *LI);
bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
void instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentMemAccess(MemoryRefInfo &O, DomTreeUpdater &DTU, LoopInfo *LI);
+ bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
+ LoopInfo *LI);
bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
Value *Ptr);
- void getMemoryRefInfos(OptimizationRemarkEmitter &ORE, Instruction *I,
- const TargetLibraryInfo &TLI,
- SmallVectorImpl<MemoryRefInfo> &Interesting);
+ void getInterestingMemoryOperands(
+ OptimizationRemarkEmitter &ORE, Instruction *I,
+ const TargetLibraryInfo &TLI,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting);
void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
@@ -812,9 +814,10 @@ bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
return Ignored;
}
-void HWAddressSanitizer::getMemoryRefInfos(
+void HWAddressSanitizer::getInterestingMemoryOperands(
OptimizationRemarkEmitter &ORE, Instruction *I,
- const TargetLibraryInfo &TLI, SmallVectorImpl<MemoryRefInfo> &Interesting) {
+ const TargetLibraryInfo &TLI,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
// Skip memory accesses inserted by another instrumentation.
if (I->hasMetadata(LLVMContext::MD_nosanitize))
return;
@@ -1085,7 +1088,7 @@ void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
MI->eraseFromParent();
}
-bool HWAddressSanitizer::instrumentMemAccess(MemoryRefInfo &O,
+bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
DomTreeUpdater &DTU,
LoopInfo *LI) {
Value *Addr = O.getPtr();
@@ -1574,7 +1577,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
- SmallVector<MemoryRefInfo, 16> OperandsToInstrument;
+ SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
SmallVector<Instruction *, 8> LandingPadVec;
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
@@ -1588,7 +1591,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
LandingPadVec.push_back(&Inst);
- getMemoryRefInfos(ORE, &Inst, TLI, OperandsToInstrument);
+ getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
if (!ignoreMemIntrinsic(ORE, MI))
diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
deleted file mode 100644
index 91e43c57a846df..00000000000000
--- a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
+++ /dev/null
@@ -1,2273 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \
-; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s
-
-declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64)
-define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* align 4 %0, i64 %1) sanitize_address {
-; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP1:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
-; CHECK: 4:
-; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP5]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
-; CHECK: 8:
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP0:%.*]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
-; CHECK-NEXT: call void @__asan_load4(i64 [[TMP10]])
-; CHECK-NEXT: br label [[TMP11]]
-; CHECK: 11:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP12]]
-; CHECK: 12:
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 [[TMP1]])
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- i64 %1)
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i1>,
- i64,
- i64)
-define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
-; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]]
-; CHECK: 6:
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]]
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
-; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]])
-; CHECK-NEXT: br label [[TMP13]]
-; CHECK: 13:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP14]]
-; CHECK: 14:
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]], i64 1)
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
- ret <vscale x 1 x i32> %a
-}
-
-declare void @llvm.riscv.vse.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64)
-define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]]
-; CHECK: 5:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]]
-; CHECK: 9:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
-; CHECK-NEXT: call void @__asan_store4(i64 [[TMP11]])
-; CHECK-NEXT: br label [[TMP12]]
-; CHECK: 12:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP13]]
-; CHECK: 13:
-; CHECK-NEXT: call void @llvm.riscv.vse.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vse.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- i64 %2)
- ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i1>,
- i64)
-define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
-; CHECK-LABEL: @intrinsic_vse_mask_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP14:%.*]]
-; CHECK: 6:
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP13]]
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
-; CHECK-NEXT: call void @__asan_store4(i64 [[TMP12]])
-; CHECK-NEXT: br label [[TMP13]]
-; CHECK: 13:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP14]]
-; CHECK: 14:
-; CHECK-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vse.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i1> %2,
- i64 %3)
- ret void
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg2_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg2_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg2_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg2_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg2.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg3_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg3_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg3.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg3_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg3_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg3.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg4_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg4_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg4.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg4_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg4_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg4.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg5_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg5_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg5.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg5_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg5_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg5.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg6_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg6_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg6.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg6_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg6_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg6.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg7_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg7_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg7.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg7_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg7_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg7.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlseg8_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlseg8_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlseg8_mask_nxv1i32(ptr align 4 %base, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlseg8_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlseg8.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- ret <vscale x 1 x i32> %1
-}
-
-declare void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg2_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg2_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg3_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg3.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg3_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg3.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg4_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg4.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg4_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg4.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg5_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg5.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg5_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg5.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg6_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg6.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg6_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg6.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg7_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg7.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg7_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg7.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
-declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
-
-define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg8_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg8.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
- ret void
-}
-
-define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr align 4 %base, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsseg8_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsseg8.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-; Test stride load
-declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* align 4 %0, i64 %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vlse_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP14:%.*]]
-; CHECK: 5:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP13]]
-; CHECK: 9:
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[IV]], [[TMP1:%.*]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
-; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP12]], i64 4)
-; CHECK-NEXT: br label [[TMP13]]
-; CHECK: 13:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP14]]
-; CHECK: 14:
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 [[TMP1]], i64 [[TMP2]])
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP16:%.*]]
-; CHECK: 7:
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP8]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP15]]
-; CHECK: 11:
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[IV]], [[TMP2:%.*]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP14]], i64 4)
-; CHECK-NEXT: br label [[TMP15]]
-; CHECK: 15:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP16]]
-; CHECK: 16:
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]], i64 1)
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-; Test stride store
-declare void @llvm.riscv.vsse.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64,
- i64);
-
-define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, i64 %3) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsse_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP15:%.*]]
-; CHECK: 6:
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP7]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP6]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP14]]
-; CHECK: 10:
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[IV]], [[TMP2:%.*]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
-; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP13]], i64 4)
-; CHECK-NEXT: br label [[TMP14]]
-; CHECK: 14:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP8]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP15]]
-; CHECK: 15:
-; CHECK-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], i64 [[TMP3]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsse.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- i64 %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- i64,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP16:%.*]]
-; CHECK: 7:
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP8]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP15]]
-; CHECK: 11:
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[IV]], [[TMP2:%.*]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4)
-; CHECK-NEXT: br label [[TMP15]]
-; CHECK: 15:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP16]]
-; CHECK: 16:
-; CHECK-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsse.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg2_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg2_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg3_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg3.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg3_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg3.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg3.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg4_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg4.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg4_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg4.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg4.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg5_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg5.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg5_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg5.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg5.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg6_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg6.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg6_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg6.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg6.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg7_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg7.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg7_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg7.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg7.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vlsseg8_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vlsseg8_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg8.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32> [[TMP2]], ptr [[BASE]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL]], i64 1)
-; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP3]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP4]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
- %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
- ret <vscale x 1 x i32> %3
-}
-
-declare void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg2_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg2.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg2_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg3_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg3.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg3_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg3.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg4_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg4.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg4_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg4.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg5_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg5.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg5_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg5.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg6_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg6.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg6_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg6.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg7_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg7.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg7_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg7.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
-declare void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64)
-
-define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg8_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg8.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl)
- ret void
-}
-
-define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vssseg8_mask_nxv1i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vssseg8.mask.nxv1i32.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-; Test stride value is a multiple of pointer alignment.
-define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32_align(<vscale x 1 x i32>* align 4 %0, i64 %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vlse_v_nxv1i32_nxv1i32_align(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
-; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP14:%.*]]
-; CHECK: 5:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP6]])
-; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
-; CHECK: .split:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP13:%.*]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP13]]
-; CHECK: 9:
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[IV]], 4
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[TMP11]] to i64
-; CHECK-NEXT: call void @__asan_load4(i64 [[TMP12]])
-; CHECK-NEXT: br label [[TMP13]]
-; CHECK: 13:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]]
-; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
-; CHECK: .split.split:
-; CHECK-NEXT: br label [[TMP14]]
-; CHECK: 14:
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 4, i64 [[TMP2]])
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- i64 4,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1)
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
-; CHECK-NEXT: ret <vscale x 1 x float> [[A]]
-;
-entry:
- %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
- <vscale x 1 x float> undef,
- <vscale x 1 x float>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) sanitize_address {
-; CHECK-LABEL: @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1)
-; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
-;
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], i64 [[TMP3:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], i64 [[TMP3:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) sanitize_address {
-; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg2_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg2_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg2_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg3_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg3_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg3_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg4_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg4_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg4_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg5_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg5_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg5_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg6_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg6_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg6_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg7_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg7_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg7_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vloxseg8_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vloxseg8_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vloxseg8_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg2_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg2_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg3_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg3_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg4_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg4_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg5_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg5_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg6_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg6_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg7_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg7_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vluxseg8_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) sanitize_address {
-; CHECK-LABEL: @test_vluxseg8_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = tail call { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
-; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP1]], 1
-; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP2]]
-;
-entry:
- %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
- %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
- ret <vscale x 1 x i32> %1
-}
-
-declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg2_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg2_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg3_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg3_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg4_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg4_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg5_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg5_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg6_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg6_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg7_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg7_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsoxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg8_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsoxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsoxseg8_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg2_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg2_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg3_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg3_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg4_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg4_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg5_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg5_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg6_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg6_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg7_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg7_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg8_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
- ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) sanitize_address {
-; CHECK-LABEL: @test_vsuxseg8_mask_nxv1i32_nxv1i16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VAL:%.*]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], <vscale x 1 x i32> [[VAL]], ptr [[BASE:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-; CHECK-NEXT: ret void
-;
-entry:
- tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
- ret void
-}
More information about the llvm-commits
mailing list