[llvm-branch-commits] [llvm] [LoongArch] Implement isLegalAddressingMode for lsx/lasx (PR #151917)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Aug 4 01:14:19 PDT 2025
https://github.com/zhaoqi5 created https://github.com/llvm/llvm-project/pull/151917
TODO: Only add tests for general vector load and store. Maybe consider adding more cases?
>From e72f55129bd953cfc3b1e077dfc7a626e2ba4412 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Mon, 28 Jul 2025 19:36:40 +0800
Subject: [PATCH] [LoongArch] Implement isLegalAddressingMode for lsx/lasx
TODO: Only add tests for general vector load and store.
Maybe consider adding more cases?
---
.../LoongArch/LoongArchISelLowering.cpp | 22 ++++++++++++++++---
.../CodeGen/LoongArch/lasx/loop-reduce.ll | 19 ++++++++--------
.../test/CodeGen/LoongArch/lsx/loop-reduce.ll | 11 +++++-----
3 files changed, 33 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a5bf0e57e3053..529a6d7063106 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -8385,7 +8385,14 @@ bool LoongArchTargetLowering::isLegalAddressingMode(const DataLayout &DL,
// 2. reg + 12-bit signed offset
// 3. reg + 14-bit signed offset left-shifted by 2
// 4. reg1 + reg2
- // TODO: Add more checks after support vector extension.
+ //
+ // LoongArch LSX/LASX vector extension has three basic addressing modes:
+ // 1. reg + 12-bit signed offset
+ // 2. reg + {12/11/10/9}-bit or 8-bit signed offset multiplied by element
+ // width. ie.
+ // si8, si8<<1, si8<<2, si8<<3
+ // si12, si11<<1, si10<<2, si9<<3
+ // 3. reg1 + reg2
// No global is ever allowed as a base.
if (AM.BaseGV)
@@ -8393,8 +8400,17 @@ bool LoongArchTargetLowering::isLegalAddressingMode(const DataLayout &DL,
// Require a 12-bit signed offset or 14-bit signed offset left-shifted by 2
// with `UAL` feature.
- if (!isInt<12>(AM.BaseOffs) &&
- !(isShiftedInt<14, 2>(AM.BaseOffs) && Subtarget.hasUAL()))
+ if (!isInt<12>(AM.BaseOffs) && !(isShiftedInt<14, 2>(AM.BaseOffs) &&
+ Subtarget.hasUAL() && !isa<VectorType>(Ty)))
+ return false;
+
+ // FIXME: Is it necessary and possible to perform fine-grained processing
+ // according to vector element types?
+ if (Subtarget.hasExtLSX() && isa<VectorType>(Ty) &&
+ !(isInt<8>(AM.BaseOffs) || isShiftedInt<8, 1>(AM.BaseOffs) ||
+ isShiftedInt<8, 2>(AM.BaseOffs) || isShiftedInt<8, 3>(AM.BaseOffs) ||
+ isShiftedInt<11, 1>(AM.BaseOffs) || isShiftedInt<10, 2>(AM.BaseOffs) ||
+ isShiftedInt<9, 3>(AM.BaseOffs)))
return false;
switch (AM.Scale) {
diff --git a/llvm/test/CodeGen/LoongArch/lasx/loop-reduce.ll b/llvm/test/CodeGen/LoongArch/lasx/loop-reduce.ll
index 9739d3012bf3c..6c8bfd2e287e7 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/loop-reduce.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/loop-reduce.ll
@@ -10,26 +10,25 @@
define dso_local void @foo() local_unnamed_addr {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lu12i.w $a0, -2
-; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI0_0)
-; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI0_0)
-; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI0_1)
-; CHECK-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI0_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_0)
+; CHECK-NEXT: xvld $xr0, $a0, %pc_lo12(.LCPI0_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_1)
+; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI0_1)
+; CHECK-NEXT: ori $a0, $zero, 1024
; CHECK-NEXT: pcalau12i $a1, %pc_hi20(a)
; CHECK-NEXT: addi.d $a1, $a1, %pc_lo12(a)
-; CHECK-NEXT: lu12i.w $a2, 2
; CHECK-NEXT: .p2align 4, , 16
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add.d $a3, $a1, $a0
-; CHECK-NEXT: xvldx $xr2, $a3, $a2
+; CHECK-NEXT: xvld $xr2, $a1, 0
; CHECK-NEXT: xvpermi.d $xr3, $xr2, 78
; CHECK-NEXT: xvori.b $xr4, $xr0, 0
; CHECK-NEXT: xvshuf.d $xr4, $xr0, $xr3
; CHECK-NEXT: xvori.b $xr3, $xr1, 0
; CHECK-NEXT: xvshuf.w $xr3, $xr4, $xr2
-; CHECK-NEXT: addi.d $a0, $a0, 16
-; CHECK-NEXT: xvstx $xr3, $a3, $a2
+; CHECK-NEXT: xvst $xr3, $a1, 0
+; CHECK-NEXT: addi.d $a0, $a0, -2
+; CHECK-NEXT: addi.d $a1, $a1, 16
; CHECK-NEXT: bnez $a0, .LBB0_1
; CHECK-NEXT: # %bb.2: # %for.end
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/LoongArch/lsx/loop-reduce.ll b/llvm/test/CodeGen/LoongArch/lsx/loop-reduce.ll
index fbb4d060c9412..9d3be735efa12 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/loop-reduce.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/loop-reduce.ll
@@ -10,18 +10,17 @@
define dso_local void @foo() local_unnamed_addr {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lu12i.w $a0, -2
+; CHECK-NEXT: ori $a0, $zero, 1024
; CHECK-NEXT: pcalau12i $a1, %pc_hi20(a)
; CHECK-NEXT: addi.d $a1, $a1, %pc_lo12(a)
-; CHECK-NEXT: lu12i.w $a2, 2
; CHECK-NEXT: .p2align 4, , 16
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add.d $a3, $a1, $a0
-; CHECK-NEXT: vldx $vr0, $a3, $a2
+; CHECK-NEXT: vld $vr0, $a1, 0
; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 9
-; CHECK-NEXT: addi.d $a0, $a0, 16
-; CHECK-NEXT: vstx $vr0, $a3, $a2
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: addi.d $a0, $a0, -2
+; CHECK-NEXT: addi.d $a1, $a1, 16
; CHECK-NEXT: bnez $a0, .LBB0_1
; CHECK-NEXT: # %bb.2: # %for.end
; CHECK-NEXT: ret
More information about the llvm-branch-commits
mailing list