[llvm-branch-commits] [llvm] 581772e - [LoongArch] Don't crash on instruction prefetch intrinsics (#135760)
Tom Stellard via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Apr 25 16:26:04 PDT 2025
Author: leecheechen
Date: 2025-04-25T16:25:42-07:00
New Revision: 581772ed077e63d6f6968c0f441a23a9f0dc14c1
URL: https://github.com/llvm/llvm-project/commit/581772ed077e63d6f6968c0f441a23a9f0dc14c1
DIFF: https://github.com/llvm/llvm-project/commit/581772ed077e63d6f6968c0f441a23a9f0dc14c1.diff
LOG: [LoongArch] Don't crash on instruction prefetch intrinsics (#135760)
Instead of failing to select during isel, drop the intrinsic in
lowering.
Similar as the X86's PR. Seeing: https://reviews.llvm.org/D151050.
Fixes #134624
(cherry picked from commit dfb5b6e27ca3f8b79ebd3346d11b3088c1600b81)
Added:
llvm/test/CodeGen/LoongArch/prefetchi.ll
Modified:
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/lib/Target/LoongArch/LoongArchISelLowering.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 2282dc8955613..4ed3c3cf92e3e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -99,7 +99,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
// Expand bitreverse.i16 with native-width bitrev and shift for now, before
// we get to know which of sll and revb.2h is faster.
@@ -459,10 +459,24 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerBITREVERSE(Op, DAG);
case ISD::SCALAR_TO_VECTOR:
return lowerSCALAR_TO_VECTOR(Op, DAG);
+ case ISD::PREFETCH:
+ return lowerPREFETCH(Op, DAG);
}
return SDValue();
}
+SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IsData = Op.getConstantOperandVal(4);
+
+ // We don't support non-data prefetch.
+ // Just preserve the chain.
+ if (!IsData)
+ return Op.getOperand(0);
+
+ return Op;
+}
+
SDValue
LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index a215ab523874b..3f44a720eca73 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -337,6 +337,7 @@ class LoongArchTargetLowering : public TargetLowering {
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBITREVERSE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
diff --git a/llvm/test/CodeGen/LoongArch/prefetchi.ll b/llvm/test/CodeGen/LoongArch/prefetchi.ll
new file mode 100644
index 0000000000000..a00f6f8161862
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/prefetchi.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+declare void @llvm.prefetch(ptr, i32, i32, i32) nounwind
+
+define dso_local void @prefetch_no_offset(ptr %ptr) nounwind {
+; LA32-LABEL: prefetch_no_offset:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: ret
+;
+; LA64-LABEL: prefetch_no_offset:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: ret
+entry:
+ tail call void @llvm.prefetch(ptr %ptr, i32 0, i32 3, i32 0)
+ ret void
+}
+
+
+define dso_local void @prefetch_with_offset(ptr %ptr) nounwind {
+; LA32-LABEL: prefetch_with_offset:
+; LA32: # %bb.0: # %entry
+; LA32-NEXT: ret
+;
+; LA64-LABEL: prefetch_with_offset:
+; LA64: # %bb.0: # %entry
+; LA64-NEXT: ret
+entry:
+ %addr = getelementptr i8, ptr %ptr, i64 200
+ tail call void @llvm.prefetch(ptr %addr, i32 0, i32 3, i32 0)
+ ret void
+}
More information about the llvm-branch-commits
mailing list