[llvm] 1d6430b - [RISCV] Update isLegalAddressingMode for RVV.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue May 3 19:50:20 PDT 2022
Author: Craig Topper
Date: 2022-05-03T19:49:11-07:00
New Revision: 1d6430b9e2b82ebb9a90632f3b39c892548528d6
URL: https://github.com/llvm/llvm-project/commit/1d6430b9e2b82ebb9a90632f3b39c892548528d6
DIFF: https://github.com/llvm/llvm-project/commit/1d6430b9e2b82ebb9a90632f3b39c892548528d6.diff
LOG: [RISCV] Update isLegalAddressingMode for RVV.
RVV instructions only support base register addressing.
Reviewed By: reames
Differential Revision: https://reviews.llvm.org/D124820
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ff63b22766d1b..2a999568bac0f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1014,6 +1014,10 @@ bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
if (AM.BaseGV)
return false;
+ // RVV instructions only support register addressing.
+ if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
+ return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
+
// Require a 12-bit signed offset.
if (!isInt<12>(AM.BaseOffs))
return false;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
index ca3b584b587d3..4828863adccdb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
@@ -596,7 +596,6 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap
;
; CHECK-ASM-LABEL: struct_gather:
; CHECK-ASM: # %bb.0: # %entry
-; CHECK-ASM-NEXT: addi a0, a0, 32
; CHECK-ASM-NEXT: addi a1, a1, 132
; CHECK-ASM-NEXT: li a2, 1024
; CHECK-ASM-NEXT: li a3, 16
@@ -606,13 +605,13 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap
; CHECK-ASM-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-ASM-NEXT: vlse32.v v8, (a4), a3
; CHECK-ASM-NEXT: vlse32.v v9, (a1), a3
-; CHECK-ASM-NEXT: addi a4, a0, -32
-; CHECK-ASM-NEXT: vle32.v v10, (a4)
-; CHECK-ASM-NEXT: vle32.v v11, (a0)
+; CHECK-ASM-NEXT: vle32.v v10, (a0)
+; CHECK-ASM-NEXT: addi a4, a0, 32
+; CHECK-ASM-NEXT: vle32.v v11, (a4)
; CHECK-ASM-NEXT: vadd.vv v8, v10, v8
; CHECK-ASM-NEXT: vadd.vv v9, v11, v9
-; CHECK-ASM-NEXT: vse32.v v8, (a4)
-; CHECK-ASM-NEXT: vse32.v v9, (a0)
+; CHECK-ASM-NEXT: vse32.v v8, (a0)
+; CHECK-ASM-NEXT: vse32.v v9, (a4)
; CHECK-ASM-NEXT: addi a2, a2, -16
; CHECK-ASM-NEXT: addi a0, a0, 64
; CHECK-ASM-NEXT: addi a1, a1, 256
@@ -838,17 +837,16 @@ define void @gather_of_pointers(i32** noalias nocapture %0, i32** noalias nocapt
;
; CHECK-ASM-LABEL: gather_of_pointers:
; CHECK-ASM: # %bb.0:
-; CHECK-ASM-NEXT: addi a0, a0, 16
; CHECK-ASM-NEXT: li a2, 1024
; CHECK-ASM-NEXT: li a3, 40
; CHECK-ASM-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
-; CHECK-ASM-NEXT: addi a4, a1, 80
; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-ASM-NEXT: vlse64.v v8, (a1), a3
+; CHECK-ASM-NEXT: addi a4, a1, 80
; CHECK-ASM-NEXT: vlse64.v v9, (a4), a3
-; CHECK-ASM-NEXT: addi a4, a0, -16
-; CHECK-ASM-NEXT: vse64.v v8, (a4)
-; CHECK-ASM-NEXT: vse64.v v9, (a0)
+; CHECK-ASM-NEXT: vse64.v v8, (a0)
+; CHECK-ASM-NEXT: addi a4, a0, 16
+; CHECK-ASM-NEXT: vse64.v v9, (a4)
; CHECK-ASM-NEXT: addi a2, a2, -4
; CHECK-ASM-NEXT: addi a0, a0, 32
; CHECK-ASM-NEXT: addi a1, a1, 160
@@ -912,14 +910,13 @@ define void @scatter_of_pointers(i32** noalias nocapture %0, i32** noalias nocap
;
; CHECK-ASM-LABEL: scatter_of_pointers:
; CHECK-ASM: # %bb.0:
-; CHECK-ASM-NEXT: addi a1, a1, 16
; CHECK-ASM-NEXT: li a2, 1024
; CHECK-ASM-NEXT: li a3, 40
; CHECK-ASM-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
-; CHECK-ASM-NEXT: addi a4, a1, -16
; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-ASM-NEXT: vle64.v v8, (a4)
-; CHECK-ASM-NEXT: vle64.v v9, (a1)
+; CHECK-ASM-NEXT: vle64.v v8, (a1)
+; CHECK-ASM-NEXT: addi a4, a1, 16
+; CHECK-ASM-NEXT: vle64.v v9, (a4)
; CHECK-ASM-NEXT: addi a4, a0, 80
; CHECK-ASM-NEXT: vsse64.v v8, (a0), a3
; CHECK-ASM-NEXT: vsse64.v v9, (a4), a3
More information about the llvm-commits
mailing list