[llvm] [Asan][RISCV] Teach AddressSanitizer to support indexed load/store. (PR #100930)
Yeting Kuo via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 7 22:58:35 PDT 2024
https://github.com/yetingk updated https://github.com/llvm/llvm-project/pull/100930
>From 7925d400f32d86f1409b04edb8ae7fa106cbe0eb Mon Sep 17 00:00:00 2001
From: Yeting Kuo <yeting.kuo at sifive.com>
Date: Thu, 25 Jul 2024 20:54:33 -0700
Subject: [PATCH] [Asan][RISCV] Teach AddressSanitizer to support indexed
load/store instructions.
This is based on #97070.
This patch helps AddressSanitizer to support indexed/segement instructions.
It adds a new member maybeOffset inot MemoryRefInfo to describle the offset
between the pointer and the address of this memory reference.
---
llvm/include/llvm/Analysis/MemoryRefInfo.h | 8 +-
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 39 +++
.../Instrumentation/AddressSanitizer.cpp | 6 +
.../RISCV/asan-rvv-intrinsics.ll | 234 +++++++++++++++++-
4 files changed, 276 insertions(+), 11 deletions(-)
diff --git a/llvm/include/llvm/Analysis/MemoryRefInfo.h b/llvm/include/llvm/Analysis/MemoryRefInfo.h
index 094141e22aa67b..e26348d1e95f36 100644
--- a/llvm/include/llvm/Analysis/MemoryRefInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryRefInfo.h
@@ -31,14 +31,18 @@ class MemoryRefInfo {
Value *MaybeEVL;
// The Stride Value, if we're looking at a strided load/store.
Value *MaybeStride;
+ // The Offset Value, if we're looking at a indexed load/store. The
+ // offset actually means byte-offset instead of array index.
+ Value *MaybeOffset;
MemoryRefInfo() = default;
MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite,
class Type *OpType, MaybeAlign Alignment,
Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr,
- Value *MaybeStride = nullptr)
+ Value *MaybeStride = nullptr, Value *MaybeOffset = nullptr)
: IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
+ MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride),
+ MaybeOffset(MaybeOffset) {
const DataLayout &DL = I->getDataLayout();
TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
PtrUse = &I->getOperandUse(OperandNo);
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index fb2b7035c7fc2b..f52c8c72741489 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
@@ -42,6 +43,8 @@ bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
const DataLayout &DL = getDataLayout();
Intrinsic::ID IID = II->getIntrinsicID();
LLVMContext &C = II->getContext();
+ Type *XLenIntTy = IntegerType::get(C, ST->getXLen());
+ IRBuilder<> IB(II);
bool HasMask = false;
switch (IID) {
@@ -110,6 +113,42 @@ bool RISCVTTIImpl::getMemoryRefInfo(SmallVectorImpl<MemoryRefInfo> &Interesting,
EVL, Stride);
return true;
}
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei_mask:
+ case Intrinsic::riscv_vsoxei_mask:
+ case Intrinsic::riscv_vsuxei_mask:
+ HasMask = true;
+ [[fallthrough]];
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsuxei: {
+ // Intrinsic interface (only listed ordered version):
+ // riscv_vloxei(merge, ptr, index, vl)
+ // riscv_vloxei_mask(merge, ptr, index, mask, vl, policy)
+ // riscv_vsoxei(val, ptr, index, vl)
+ // riscv_vsoxei_mask(val, ptr, index, mask, vl, policy)
+ bool IsWrite = II->getType()->isVoidTy();
+ Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType();
+ const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
+ unsigned VLIndex = RVVIInfo->VLOperand;
+ unsigned PtrOperandNo = VLIndex - 2 - HasMask;
+ // RVV indexed loads/stores zero-extend offset operands which are narrower
+ // than XLEN to XLEN.
+ Value *OffsetOp = II->getArgOperand(PtrOperandNo + 1);
+ Type *OffsetTy = OffsetOp->getType();
+ if (OffsetTy->getScalarType()->getIntegerBitWidth() < ST->getXLen()) {
+ VectorType *OrigType = cast<VectorType>(OffsetTy);
+ Type *ExtendTy = VectorType::get(XLenIntTy, OrigType);
+ OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
+ }
+ Value *Mask = ConstantInt::get(OffsetTy->getWithNewBitWidth(1), 1);
+ if (HasMask)
+ Mask = II->getArgOperand(VLIndex - 1);
+ Value *EVL = II->getArgOperand(VLIndex);
+ Interesting.emplace_back(II, PtrOperandNo, IsWrite, Ty, Align(1), Mask, EVL,
+ /* Stride */ nullptr, OffsetOp);
+ }
}
return false;
}
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5fc4d826e00158..ae634b36f8b780 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1733,6 +1733,12 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
else
NumInstrumentedReads++;
+ if (O.MaybeOffset) {
+ Type *Ty = Type::getInt8Ty(*C);
+ IRBuilder IB(O.getInsn());
+ Addr = IB.CreateGEP(Ty, Addr, {O.MaybeOffset});
+ }
+
unsigned Granularity = 1 << Mapping.Scale;
if (O.MaybeMask) {
instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
index 91e43c57a846df..a32d75537921d9 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll
@@ -1253,7 +1253,31 @@ define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
; CHECK-LABEL: @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
+; CHECK-NEXT: [[TMP4:%.*]] = zext <vscale x 1 x i16> [[TMP1:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], <vscale x 1 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]]
+; CHECK: 7:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]]
+; CHECK: 11:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x ptr> [[TMP5]], i64 [[IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4)
+; CHECK-NEXT: br label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP15]]
+; CHECK: 15:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], <vscale x 1 x i16> [[TMP1]], i64 [[TMP2]])
; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
;
entry:
@@ -1278,7 +1302,31 @@ define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
; CHECK-LABEL: @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1)
+; CHECK-NEXT: [[TMP6:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]]
+; CHECK: 13:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 1 x ptr> [[TMP7]], i64 [[IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4)
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP17]]
+; CHECK: 17:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]], i64 1)
; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
;
entry:
@@ -1302,7 +1350,31 @@ define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale
; CHECK-LABEL: @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
+; CHECK-NEXT: [[TMP4:%.*]] = zext <vscale x 1 x i16> [[TMP1:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], <vscale x 1 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]]
+; CHECK: 7:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]]
+; CHECK: 11:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x ptr> [[TMP5]], i64 [[IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4)
+; CHECK-NEXT: br label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP15]]
+; CHECK: 15:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, ptr [[TMP0]], <vscale x 1 x i16> [[TMP1]], i64 [[TMP2]])
; CHECK-NEXT: ret <vscale x 1 x float> [[A]]
;
entry:
@@ -1325,7 +1397,31 @@ define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x
; CHECK-LABEL: @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0:%.*]], <vscale x 1 x i16> [[TMP1:%.*]], i64 [[TMP2:%.*]])
+; CHECK-NEXT: [[TMP4:%.*]] = zext <vscale x 1 x i16> [[TMP1:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], <vscale x 1 x i64> [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]]
+; CHECK: 7:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]]
+; CHECK: 11:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x ptr> [[TMP5]], i64 [[IV]]
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4)
+; CHECK-NEXT: br label [[TMP14]]
+; CHECK: 14:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP15]]
+; CHECK: 15:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], <vscale x 1 x i16> [[TMP1]], i64 [[TMP2]])
; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
;
entry:
@@ -1350,7 +1446,31 @@ define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vsca
; CHECK-LABEL: @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1)
+; CHECK-NEXT: [[TMP6:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]]
+; CHECK: 13:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 1 x ptr> [[TMP7]], i64 [[IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4)
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP17]]
+; CHECK: 17:
+; CHECK-NEXT: [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]], i64 1)
; CHECK-NEXT: ret <vscale x 1 x i32> [[A]]
;
entry:
@@ -1374,7 +1494,31 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], i64 [[TMP3:%.*]])
+; CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]]
+; CHECK: 12:
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 1 x ptr> [[TMP6]], i64 [[IV]]
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4)
+; CHECK-NEXT: br label [[TMP15]]
+; CHECK: 15:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], i64 [[TMP3]])
; CHECK-NEXT: ret void
;
entry:
@@ -1398,7 +1542,31 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]]
+; CHECK: 13:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 1 x ptr> [[TMP7]], i64 [[IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4)
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP17]]
+; CHECK: 17:
+; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]])
; CHECK-NEXT: ret void
;
entry:
@@ -1422,7 +1590,31 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <
; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], i64 [[TMP3:%.*]])
+; CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]]
+; CHECK: 12:
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 1 x ptr> [[TMP6]], i64 [[IV]]
+; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4)
+; CHECK-NEXT: br label [[TMP15]]
+; CHECK: 15:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], i64 [[TMP3]])
; CHECK-NEXT: ret void
;
entry:
@@ -1446,7 +1638,31 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>
; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8
-; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1:%.*]], <vscale x 1 x i16> [[TMP2:%.*]], <vscale x 1 x i1> [[TMP3:%.*]], i64 [[TMP4:%.*]])
+; CHECK-NEXT: [[TMP6:%.*]] = zext <vscale x 1 x i16> [[TMP2:%.*]] to <vscale x 1 x i64>
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], <vscale x 1 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]]
+; CHECK: 9:
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 1 x i1> [[TMP3:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]]
+; CHECK: 13:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <vscale x 1 x ptr> [[TMP7]], i64 [[IV]]
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4)
+; CHECK-NEXT: br label [[TMP16]]
+; CHECK: 16:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP17]]
+; CHECK: 17:
+; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i16> [[TMP2]], <vscale x 1 x i1> [[TMP3]], i64 [[TMP4]])
; CHECK-NEXT: ret void
;
entry:
More information about the llvm-commits
mailing list