[llvm] 2c57868 - [RISCV] Add vector load/store intrinsics to getTgtMemIntrinsic.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 5 19:28:50 PDT 2023
Author: Craig Topper
Date: 2023-04-05T19:28:05-07:00
New Revision: 2c57868e2e877f73c339796c3374ae660bb77f0d
URL: https://github.com/llvm/llvm-project/commit/2c57868e2e877f73c339796c3374ae660bb77f0d
DIFF: https://github.com/llvm/llvm-project/commit/2c57868e2e877f73c339796c3374ae660bb77f0d.diff
LOG: [RISCV] Add vector load/store intrinsics to getTgtMemIntrinsic.
This constructs a proper memory operand for these intrinsics.
Segment load/store will be added in a separate patch.
Reviewed By: kito-cheng
Differential Revision: https://reviews.llvm.org/D147119
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 89d7eef4bfd0..9f178139f18e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1225,6 +1225,34 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return SetRVVLoadStoreInfo(/*PtrOp*/ I.getNumOperands() - 3,
/*IsStore*/ true,
/*IsUnitStrided*/ false);
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vleff:
+ case Intrinsic::riscv_vleff_mask:
+ return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
+ /*IsStore*/ false,
+ /*IsUnitStrided*/ true);
+ case Intrinsic::riscv_vse:
+ case Intrinsic::riscv_vse_mask:
+ return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
+ /*IsStore*/ true,
+ /*IsUnitStrided*/ true);
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vlse_mask:
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vluxei_mask:
+ return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
+ /*IsStore*/ false,
+ /*IsUnitStrided*/ false);
+ case Intrinsic::riscv_vsse:
+ case Intrinsic::riscv_vsse_mask:
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsuxei:
+ return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
+ /*IsStore*/ true,
+ /*IsUnitStrided*/ false);
}
}
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 993decccfcba..529dafe59479 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -24,17 +24,18 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
-; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
@@ -51,9 +52,12 @@ define void @_Z3foov() {
; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
+; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
+; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
+; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
-; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 1
@@ -65,9 +69,6 @@ define void @_Z3foov() {
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
-; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
-; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a0, 1048572
; CHECK-NEXT: addiw a0, a0, 928
; CHECK-NEXT: vmsbc.vx v0, v8, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index c0fa9042871c..2668774528bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -885,15 +885,17 @@ if.end: ; preds = %if.else, %if.then
define <vscale x 2 x i32> @test_ratio_only_vmv_s_x(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, i1 %cond) nounwind {
; CHECK-LABEL: test_ratio_only_vmv_s_x:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: andi a2, a2, 1
; CHECK-NEXT: beqz a2, .LBB20_2
; CHECK-NEXT: # %bb.1: # %if
+; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vwcvt.x.x.v v8, v9
-; CHECK-NEXT: .LBB20_2: # %if.end
+; CHECK-NEXT: j .LBB20_3
+; CHECK-NEXT: .LBB20_2:
+; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: .LBB20_3: # %if.end
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v8, zero
; CHECK-NEXT: ret
@@ -915,14 +917,15 @@ if.end:
define <vscale x 2 x i32> @test_ratio_only_vmv_s_x2(<vscale x 2 x i32>* %x, <vscale x 2 x i16>* %y, i1 %cond) nounwind {
; CHECK-LABEL: test_ratio_only_vmv_s_x2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; CHECK-NEXT: vle16.v v9, (a1)
; CHECK-NEXT: andi a2, a2, 1
; CHECK-NEXT: beqz a2, .LBB21_2
; CHECK-NEXT: # %bb.1: # %if
+; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: j .LBB21_3
; CHECK-NEXT: .LBB21_2:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v9, (a1)
; CHECK-NEXT: vwcvt.x.x.v v8, v9
; CHECK-NEXT: .LBB21_3: # %if.end
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
More information about the llvm-commits
mailing list