[llvm] [MemorySanitizer] Use getelementptr instead of ptrtoint+add+inttoptr (PR #161392)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 30 08:42:56 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-mips
@llvm/pr-subscribers-backend-risc-v
Author: Nikita Popov (nikic)
<details>
<summary>Changes</summary>
MemorySanitizer currently does a lot of pointer arithmetic using ptrtoint+add+inttoptr instead of using getelementptr. As far as I can tell, there is no need to use this pattern -- msan is not trying to synthesize pointers with different provenance here. The pointers in question stay within one object (like the TLS parameter area).
I suspect that this is just a leftover from pre-opaque-pointer types where this was a natural way to perform offset arithmetic. Nowadays we should just emit a getelementptr i8, aka ptradd.
Note: I still need to update a couple of tests with manual check lines here, but the current changes should be representative.
---
Patch is 3.78 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/161392.diff
86 Files Affected:
- (modified) llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (+25-31)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll (+36-36)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll (+6-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll (+6-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll (+173-173)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll (+179-179)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll (+21-21)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll (+9-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll (+9-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll (+119-119)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll (+15-15)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll (+3-3)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll (+69-66)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll (+12-12)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll (+193-193)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll (+148-148)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll (+217-217)
- (modified) llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll (+119-239)
- (modified) llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll (+208-208)
- (modified) llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll (+208-208)
- (modified) llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll (+207-207)
- (modified) llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll (+7-7)
- (modified) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll (+100-160)
- (modified) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll (+220-220)
- (modified) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll (+220-220)
- (modified) llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll (+208-208)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll (+54-54)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll (+60-60)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll (+94-94)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll (+118-118)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll (+36-36)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll (+1114-1114)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll (+729-729)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll (+397-397)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll (+185-185)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll (+119-116)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll (+112-109)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll (+132-132)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll (+715-715)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll (+48-48)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll (+48-48)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll (+24-24)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll (+24-24)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll (+16-16)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll (+24-24)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll (+36-36)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll (+7-4)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll (+74-74)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll (+28-28)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll (+39-39)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll (+18-18)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll (+3-3)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll (+9-9)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll (+94-142)
- (modified) llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll (+38-38)
- (modified) llvm/test/Instrumentation/MemorySanitizer/array_types.ll (+6-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/bmi.ll (+8-8)
- (modified) llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll (+1-1)
- (modified) llvm/test/Instrumentation/MemorySanitizer/byval.ll (+12-12)
- (modified) llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll (+6-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll (+72-72)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll (+54-54)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll (+118-118)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll (+74-74)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll (+6-6)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll (+18-18)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll (+39-39)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll (+18-18)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll (+198-198)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll (+30-30)
- (modified) llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll (+59-59)
- (modified) llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll (+24-24)
- (modified) llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll (+235-247)
- (modified) llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll (+303-307)
- (modified) llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll (+7-7)
- (modified) llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll (+2-2)
- (modified) llvm/test/Instrumentation/MemorySanitizer/or.ll (+3-3)
- (modified) llvm/test/Instrumentation/MemorySanitizer/overflow.ll (+7-7)
- (modified) llvm/test/Instrumentation/MemorySanitizer/pr32842.ll (+1-1)
- (modified) llvm/test/Instrumentation/MemorySanitizer/saturating.ll (+7-7)
- (modified) llvm/test/Instrumentation/MemorySanitizer/scmp.ll (+22-22)
- (modified) llvm/test/Instrumentation/MemorySanitizer/ucmp.ll (+19-19)
- (modified) llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll (+8-8)
- (modified) llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll (+8-8)
- (modified) llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll (+4-4)
- (modified) llvm/test/Instrumentation/MemorySanitizer/vscale.ll (+3-3)
``````````diff
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index cf076b9ad70ee..0402236de8916 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1,4 +1,4 @@
-//===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
+//===- memorysanitizer.cpp - detector of uninitialized reads --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -1923,20 +1923,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// Shadow = ParamTLS+ArgOffset.
Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
+ if (ArgOffset == 0)
+ return MS.ParamTLS;
+ return IRB.CreatePtrAdd(MS.ParamTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset));
}
/// Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
- if (ArgOffset)
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
+ if (ArgOffset == 0)
+ return MS.ParamOriginTLS;
+ return IRB.CreatePtrAdd(MS.ParamOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset));
}
/// Compute the shadow address for a retval.
@@ -7219,9 +7219,10 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the shadow address for a given va_arg.
Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s");
+ if (ArgOffset == 0)
+ return MS.VAArgTLS;
+ return IRB.CreatePtrAdd(MS.VAArgTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset));
}
/// Compute the shadow address for a given va_arg.
@@ -7235,12 +7236,13 @@ struct VarArgHelperBase : public VarArgHelper {
/// Compute the origin address for a given va_arg.
Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
+ if (ArgOffset == 0)
+ return MS.VAArgOriginTLS;
// getOriginPtrForVAArgument() is always called after
// getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
// overflow.
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o");
+ return IRB.CreatePtrAdd(MS.VAArgOriginTLS,
+ ConstantInt::get(MS.IntptrTy, ArgOffset));
}
void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
@@ -7467,10 +7469,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 16)),
- MS.PtrTy);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16));
Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const Align Alignment = Align(16);
@@ -7482,10 +7482,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
- Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, 8)),
- MS.PtrTy);
+ Value *OverflowArgAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8));
Value *OverflowArgAreaPtr =
IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
@@ -7615,19 +7613,15 @@ struct VarArgAArch64Helper : public VarArgHelperBase {
// Retrieve a va_list field of 'void*' size.
Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtrPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtr = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, offset)),
- MS.PtrTy);
+ Value *SaveAreaPtr =
+ IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset));
Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
index 99e9ab939847c..864f6a973334e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll
@@ -877,7 +877,7 @@ define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int8x16x2_t @ld2lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -904,8 +904,8 @@ define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16
; CHECK-LABEL: define %struct.__neon_int8x16x3_t @ld3lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -936,9 +936,9 @@ define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16
; CHECK-LABEL: define %struct.__neon_int8x16x4_t @ld4lane_16b(
; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], <16 x i8> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -977,7 +977,7 @@ define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int16x8x2_t @ld2lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1004,8 +1004,8 @@ define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x
; CHECK-LABEL: define %struct.__neon_int16x8x3_t @ld3lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1036,9 +1036,9 @@ define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x
; CHECK-LABEL: define %struct.__neon_int16x8x4_t @ld4lane_8h(
; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], <8 x i16> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1077,7 +1077,7 @@ define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int32x4x2_t @ld2lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1104,8 +1104,8 @@ define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x
; CHECK-LABEL: define %struct.__neon_int32x4x3_t @ld3lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1136,9 +1136,9 @@ define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x
; CHECK-LABEL: define %struct.__neon_int32x4x4_t @ld4lane_4s(
; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], <4 x i32> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -1177,7 +1177,7 @@ define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, ptr
; CHECK-LABEL: define %struct.__neon_int64x2x2_t @ld2lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
@@ -1204,8 +1204,8 @@ define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x
; CHECK-LABEL: define %struct.__neon_int64x2x3_t @ld3lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
@@ -1236,9 +1236,9 @@ define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x
; CHECK-LABEL: define %struct.__neon_int64x2x4_t @ld4lane_2d(
; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], <2 x i64> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
@@ -2304,7 +2304,7 @@ define <16 x i8> @ld1_16b(<16 x i8> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <16 x i8> @ld1_16b(
; CHECK-SAME: <16 x i8> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2332,7 +2332,7 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <8 x i16> @ld1_8h(
; CHECK-SAME: <8 x i16> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -2360,7 +2360,7 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, ptr %bar) #0 {
; Make sure we are using the operands defined by the ABI
; CHECK-LABEL: define <4 x i32> @ld1_4s(
; CHECK-SAME: <4 x i32> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/161392
More information about the llvm-commits
mailing list