[llvm] [instCombine][bugfix] Fix crash caused by using of cast in instCombineSVECmpNE (PR #102472)

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 14 20:44:39 PDT 2024


https://github.com/cceerczw updated https://github.com/llvm/llvm-project/pull/102472

>From 3771bbf8fd4b8b0a1cfd777e7bcd49e807e442b6 Mon Sep 17 00:00:00 2001
From: chengzhiwei <chengzhiwei6 at huawei.com>
Date: Thu, 8 Aug 2024 15:02:45 +0800
Subject: [PATCH] [instCombine][bugfix] Fix crash caused by using of cast in
 instCombineSVECmpNE.

Func instCombineSVECmpNE is used to identify specific pattern of instruction
'svecmene',
and then predict its result, use the result to replace instruction 'svecmene'.
The specific pattern can be descriped below:
1.The svecmpne must compare all elements of vec.
2.The svecmpne inst compare its ves with zero.
3.The vec in svecmpne inst is generated by inst dupqlane, and the copy
value of this dupqlane must be zero.
In NO.3 above, func instCombineSVECmpNE uses 'cast' to transform op1 of
dupqlane without checking if the cast is success, then generate a crash
in some situation.
---
 .../AArch64/AArch64TargetTransformInfo.cpp    |  3 ++-
 .../AArch64/sve-inst-combine-cmpne.ll         | 23 +++++++++++++++++++
 2 files changed, 25 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/Transforms/InstCombine/AArch64/sve-inst-combine-cmpne.ll

diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 79c0e45e3aa5b5..3b4bb3dcb1b191 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1174,7 +1174,8 @@ static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
     return std::nullopt;
 
   // Where the dupq is a lane 0 replicate of a vector insert
-  if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
+  auto *DupQLaneIdx = dyn_cast<ConstantInt>(DupQLane->getArgOperand(1));
+  if (!DupQLaneIdx || !DupQLaneIdx->isZero())
     return std::nullopt;
 
   auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-inst-combine-cmpne.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-inst-combine-cmpne.ll
new file mode 100644
index 00000000000000..668a8bd3f8063d
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-inst-combine-cmpne.ll
@@ -0,0 +1,23 @@
+; RUN: opt -S -mtriple=aarch64-unknown-linux-gnu -O2 < %s | FileCheck %s
+
+define dso_local i64 @func(i64 noundef %0, i64 noundef %1) local_unnamed_addr {
+  %3 = add nsw i64 %1, %0
+  %4 = mul nsw i64 %3, 3
+  ret i64 %4
+}
+
+define <vscale x 16 x i1> @testInstCombineSVECmpNE() {
+  %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
+  %2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 42, i8 1)
+  %3 = tail call i64 @func(i64 noundef 1, i64 noundef 2)
+  %4 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2 , i64 %3)
+  %5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %4, <vscale x 16 x i8> zeroinitializer)
+  ret <vscale x 16 x i1> %5
+  ; CHECK: %4 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %3, <vscale x 16 x i8> zeroinitializer)
+  ; CHECK-NEXT: ret <vscale x 16 x i1> %4
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8, i8)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8>, i64)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)



More information about the llvm-commits mailing list