[llvm] 37e7a80 - [VE] Add lsv/lvs intrinsic instructions

Kazushi Marukawa via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 16 06:43:00 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-11-16T23:42:51+09:00
New Revision: 37e7a80aed7c92575e20ff44f10408eb0cf47bad

URL: https://github.com/llvm/llvm-project/commit/37e7a80aed7c92575e20ff44f10408eb0cf47bad
DIFF: https://github.com/llvm/llvm-project/commit/37e7a80aed7c92575e20ff44f10408eb0cf47bad.diff

LOG: [VE] Add lsv/lvs intrinsic instructions

Add lsv/lvs intrinsic instructions and a regression test.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D91526

Added: 
    llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
    llvm/lib/Target/VE/VEInstrIntrinsicVL.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
index 56d0226181df..7fbe9e464092 100644
--- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
+++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
@@ -80,3 +80,7 @@ let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssl : GCCBuiltin<"__builtin
 let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstl2dncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_pfchv_ssl : GCCBuiltin<"__builtin_ve_vl_pfchv_ssl">, Intrinsic<[], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrInaccessibleMemOrArgMemOnly]>;
 let TargetPrefix = "ve" in def int_ve_vl_pfchvnc_ssl : GCCBuiltin<"__builtin_ve_vl_pfchvnc_ssl">, Intrinsic<[], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrInaccessibleMemOrArgMemOnly]>;
+let TargetPrefix = "ve" in def int_ve_vl_lsv_vvss : GCCBuiltin<"__builtin_ve_vl_lsv_vvss">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i64>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_lvsl_svs : GCCBuiltin<"__builtin_ve_vl_lvsl_svs">, Intrinsic<[LLVMType<i64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_lvsd_svs : GCCBuiltin<"__builtin_ve_vl_lvsd_svs">, Intrinsic<[LLVMType<f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_lvss_svs : GCCBuiltin<"__builtin_ve_vl_lvss_svs">, Intrinsic<[LLVMType<f32>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;

diff  --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.td
index c8d253ef65ff..29365b327f27 100644
--- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.td
+++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.td
@@ -2,5 +2,21 @@
 
 // Define intrinsics written by hand
 
+// The lsv and lvs patterns
+def : Pat<(int_ve_vl_lsv_vvss v256f64:$pt, i32:$sy, i64:$sz),
+          (LSVrr_v (INSERT_SUBREG (i64 (IMPLICIT_DEF)), i32:$sy, sub_i32),
+                   i64:$sz, v256f64:$pt)>;
+def : Pat<(int_ve_vl_lvsl_svs v256f64:$vx, i32:$sy),
+          (LVSvr v256f64:$vx,
+                 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), i32:$sy, sub_i32))>;
+def : Pat<(int_ve_vl_lvsd_svs v256f64:$vx, i32:$sy),
+          (LVSvr v256f64:$vx,
+                 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), i32:$sy, sub_i32))>;
+def : Pat<(int_ve_vl_lvss_svs v256f64:$vx, i32:$sy),
+          (EXTRACT_SUBREG (LVSvr v256f64:$vx,
+                                 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), i32:$sy,
+                                                sub_i32)),
+                          sub_f32)>;
+
 // Define intrinsics automatically generated
 include "VEInstrIntrinsicVL.gen.td"

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll b/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
new file mode 100644
index 000000000000..36f84330770d
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
@@ -0,0 +1,86 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test prefetch vector intrinsic instructions
+;;;
+;;; Note:
+;;;   We test LSVrr_v and LVSvr instructions.
+
+; Function Attrs: nounwind
+define void @lsv_vvss(i8* %0, i64 %1, i32 signext %2) {
+; CHECK-LABEL: lsv_vvss:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s3, 256
+; CHECK-NEXT:    lvl %s3
+; CHECK-NEXT:    vld %v0, 8, %s0
+; CHECK-NEXT:    adds.w.sx %s2, %s2, (0)1
+; CHECK-NEXT:    lsv %v0(%s2), %s1
+; CHECK-NEXT:    vst %v0, 8, %s0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double> %4, i32 %2, i64 %1)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256)
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+
+; Function Attrs: nounwind readnone
+declare <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double>, i32, i64)
+
+; Function Attrs: nounwind writeonly
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+
+; Function Attrs: nounwind readonly
+define i64 @lvsl_vssl_imm(i8* readonly %0, i32 signext %1) {
+; CHECK-LABEL: lvsl_vssl_imm:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vld %v0, 8, %s0
+; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
+; CHECK-NEXT:    lvs %s0, %v0(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %4 = tail call i64 @llvm.ve.vl.lvsl.svs(<256 x double> %3, i32 %1)
+  ret i64 %4
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ve.vl.lvsl.svs(<256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define double @lvsd_vssl_imm(i8* readonly %0, i32 signext %1) {
+; CHECK-LABEL: lvsd_vssl_imm:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vld %v0, 8, %s0
+; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
+; CHECK-NEXT:    lvs %s0, %v0(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %4 = tail call fast double @llvm.ve.vl.lvsd.svs(<256 x double> %3, i32 %1)
+  ret double %4
+}
+
+; Function Attrs: nounwind readnone
+declare double @llvm.ve.vl.lvsd.svs(<256 x double>, i32)
+
+; Function Attrs: nounwind readonly
+define float @lvss_vssl_imm(i8* readonly %0, i32 signext %1) {
+; CHECK-LABEL: lvss_vssl_imm:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s2, 256
+; CHECK-NEXT:    lvl %s2
+; CHECK-NEXT:    vld %v0, 8, %s0
+; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
+; CHECK-NEXT:    lvs %s0, %v0(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %4 = tail call fast float @llvm.ve.vl.lvss.svs(<256 x double> %3, i32 %1)
+  ret float %4
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.ve.vl.lvss.svs(<256 x double>, i32)


        


More information about the llvm-commits mailing list