[llvm] [RISCV][VLOPT] Skip EMUL if it is unknown before entering EMULAndEEWAreEqual (PR #139670)
via llvm-commits
llvm-commits at lists.llvm.org
Mon May 12 23:21:05 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Piyou Chen (BeMg)
<details>
<summary>Changes</summary>
Fix https://github.com/llvm/llvm-project/issues/139288
---
Full diff: https://github.com/llvm/llvm-project/pull/139670.diff
2 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp (+5)
- (added) llvm/test/CodeGen/RISCV/rvv/139288-VLOPT-crash.ll (+22)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 9ed2ba274bc53..88876201ae21b 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -1383,6 +1383,11 @@ RISCVVLOptimizer::checkUsers(const MachineInstr &MI) const {
// If the operand is used as a scalar operand, then the EEW must be
// compatible. Otherwise, the EMUL *and* EEW must be compatible.
bool IsVectorOpUsedAsScalarOp = isVectorOpUsedAsScalarOp(UserOp);
+
+ if (!IsVectorOpUsedAsScalarOp &&
+ (!ConsumerInfo->EMUL || !ProducerInfo->EMUL))
+ return std::nullopt;
+
if ((IsVectorOpUsedAsScalarOp &&
!OperandInfo::EEWAreEqual(*ConsumerInfo, *ProducerInfo)) ||
(!IsVectorOpUsedAsScalarOp &&
diff --git a/llvm/test/CodeGen/RISCV/rvv/139288-VLOPT-crash.ll b/llvm/test/CodeGen/RISCV/rvv/139288-VLOPT-crash.ll
new file mode 100644
index 0000000000000..1e2f0173505c3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/139288-VLOPT-crash.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+
+define i32 @pps_is_equal(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1) #0 {
+; CHECK-LABEL: pps_is_equal:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT: vmclr.m v8
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vmor.mm v0, v8, v8
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vredmax.vs v8, v8, v16, v0.t
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %2 = tail call <vscale x 16 x i1> @llvm.vp.or.nxv16i1(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> %0, i32 1)
+ %3 = tail call i32 @llvm.vp.reduce.smax.nxv16i32(i32 0, <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i1> %2, i32 1)
+ ret i32 %3
+}
+
+declare <vscale x 16 x i1> @llvm.vp.or.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32) #1
+declare i32 @llvm.vp.reduce.smax.nxv16i32(i32, <vscale x 16 x i32>, <vscale x 16 x i1>, i32) #1
``````````
</details>
https://github.com/llvm/llvm-project/pull/139670
More information about the llvm-commits
mailing list