[llvm] [Asan] Provide TTI hook to provide memory reference infromation of target intrinsics. (PR #97070)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 23 15:32:43 PDT 2024


================
@@ -0,0 +1,2209 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \
+; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i64)
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* align 4 %0, i64 %1) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP11:%.*]]
+; CHECK:       3:
+; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    br label [[DOTSPLIT:%.*]]
+; CHECK:       .split:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP3]] ], [ [[IV_NEXT:%.*]], [[TMP10:%.*]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT:    br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP10]]
+; CHECK:       7:
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP0:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    call void @__asan_load4(i64 [[TMP9]])
+; CHECK-NEXT:    br label [[TMP10]]
+; CHECK:       10:
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP5]]
+; CHECK-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK:       .split.split:
+; CHECK-NEXT:    br label [[TMP11]]
+; CHECK:       11:
+; CHECK-NEXT:    [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr [[TMP0]], i64 [[TMP1]])
+; CHECK-NEXT:    ret <vscale x 1 x i32> [[A]]
+;
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+  <vscale x 1 x i32> undef,
+  <vscale x 1 x i32>* %0,
+  i64 %1)
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i1>,
+  i64,
+  i64)
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
+; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]]
+; CHECK:       5:
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP6]])
+; CHECK-NEXT:    br label [[DOTSPLIT:%.*]]
+; CHECK:       .split:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]]
+; CHECK:       9:
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT:    call void @__asan_load4(i64 [[TMP11]])
+; CHECK-NEXT:    br label [[TMP12]]
+; CHECK:       12:
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]]
+; CHECK-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK:       .split.split:
+; CHECK-NEXT:    br label [[TMP13]]
+; CHECK:       13:
+; CHECK-NEXT:    [[A:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]], i64 1)
+; CHECK-NEXT:    ret <vscale x 1 x i32> [[A]]
+;
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+  <vscale x 1 x i32> %0,
+  <vscale x 1 x i32>* %1,
+  <vscale x 1 x i1> %2,
+  i64 %3, i64 1)
+  ret <vscale x 1 x i32> %a
+}
+
+declare void @llvm.riscv.vse.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i64)
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, i64 %2) sanitize_address {
+; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i64 [[TMP2:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
+; CHECK:       4:
+; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP5]])
+; CHECK-NEXT:    br label [[DOTSPLIT:%.*]]
+; CHECK:       .split:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i64 [[IV]]
+; CHECK-NEXT:    br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
+; CHECK:       8:
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; CHECK-NEXT:    call void @__asan_store4(i64 [[TMP10]])
+; CHECK-NEXT:    br label [[TMP11]]
+; CHECK:       11:
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
+; CHECK-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK:       .split.split:
+; CHECK-NEXT:    br label [[TMP12]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @llvm.riscv.vse.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.riscv.vse.nxv1i32(
+  <vscale x 1 x i32> %0,
+  <vscale x 1 x i32>* %1,
+  i64 %2)
+  ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i1>,
+  i64)
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* align 4 %1, <vscale x 1 x i1> %2, i64 %3) sanitize_address {
+; CHECK-LABEL: @intrinsic_vse_mask_v_nxv1i32_nxv1i32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3:%.*]], 0
+; CHECK-NEXT:    br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]]
+; CHECK:       5:
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP6]])
+; CHECK-NEXT:    br label [[DOTSPLIT:%.*]]
+; CHECK:       .split:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <vscale x 1 x i1> [[TMP2:%.*]], i64 [[IV]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]]
+; CHECK:       9:
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr <vscale x 1 x i32>, ptr [[TMP1:%.*]], i64 0, i64 [[IV]]
+; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT:    call void @__asan_store4(i64 [[TMP11]])
+; CHECK-NEXT:    br label [[TMP12]]
+; CHECK:       12:
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]]
+; CHECK-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK:       .split.split:
+; CHECK-NEXT:    br label [[TMP13]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @llvm.riscv.vse.mask.nxv1i32.i64(<vscale x 1 x i32> [[TMP0:%.*]], ptr [[TMP1]], <vscale x 1 x i1> [[TMP2]], i64 [[TMP3]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.riscv.vse.mask.nxv1i32(
+  <vscale x 1 x i32> %0,
+  <vscale x 1 x i32>* %1,
+  <vscale x 1 x i1> %2,
+  i64 %3)
+  ret void
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64, i64)
+
+define <vscale x 1 x i32> @test_vlseg2_nxv1i32(ptr align 4 %base, i64 %vl) sanitize_address {
----------------
topperc wrote:

Is this just showing we don't support this intrinsic yet?

https://github.com/llvm/llvm-project/pull/97070


More information about the llvm-commits mailing list