[llvm] a0c15ed - [AArch64][SVE] Add the @llvm.aarch64.sve.dup.x intrinsic

Andrzej Warzynski via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 13 05:41:15 PDT 2020


Author: Andrzej Warzynski
Date: 2020-03-13T12:40:22Z
New Revision: a0c15ed46056a5c8bc6f86d6d636b6375354efc6

URL: https://github.com/llvm/llvm-project/commit/a0c15ed46056a5c8bc6f86d6d636b6375354efc6
DIFF: https://github.com/llvm/llvm-project/commit/a0c15ed46056a5c8bc6f86d6d636b6375354efc6.diff

LOG: [AArch64][SVE] Add the @llvm.aarch64.sve.dup.x intrinsic

Summary:
This intrinsic implements the unpredicated duplication of scalar values
and is mapped to (through ISD::SPLAT_VECTOR):
  * DUP <Zd>.<T>, #<imm>
  * DUP <Zd>.<T>, <R><n|SP>

Reviewed by: sdesmalen

Differential Revision: https://reviews.llvm.org/D75900

Added: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-dup-x.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 033e63255972..3a205de4e368 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -961,6 +961,10 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  LLVMVectorElementType<0>],
                 [IntrNoMem]>;
 
+  class AdvSIMD_SVE_DUP_Unpred_Intrinsic
+    : Intrinsic<[llvm_anyvector_ty], [LLVMVectorElementType<0>],
+                [IntrNoMem]>;
+
   class AdvSIMD_SVE_DUPQ_Intrinsic
     : Intrinsic<[llvm_anyvector_ty],
                 [LLVMMatchType<0>,
@@ -1287,6 +1291,8 @@ def int_aarch64_sve_prf
 //
 
 def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic;
+def int_aarch64_sve_dup_x : AdvSIMD_SVE_DUP_Unpred_Intrinsic;
+
 
 def int_aarch64_sve_index : AdvSIMD_SVE_Index_Intrinsic;
 

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e2c56c39c06d..23df49790b5e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11296,6 +11296,9 @@ static SDValue performIntrinsicCombine(SDNode *N,
     return LowerSVEIntrinsicIndex(N, DAG);
   case Intrinsic::aarch64_sve_dup:
     return LowerSVEIntrinsicDUP(N, DAG);
+  case Intrinsic::aarch64_sve_dup_x:
+    return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
+                       N->getOperand(1));
   case Intrinsic::aarch64_sve_ext:
     return LowerSVEIntrinsicEXT(N, DAG);
   case Intrinsic::aarch64_sve_sel:

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-dup-x.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-dup-x.ll
new file mode 100644
index 000000000000..8cc1ca86836b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-dup-x.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
+
+;
+; Unpredicated dup instruction (which is an alias for mov):
+;   * register + register,
+;   * register + immediate
+;
+
+define <vscale x 16 x i8> @dup_i8(i8 %b) {
+; CHECK-LABEL: dup_i8:
+; CHECK: mov z0.b, w0
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 16 x i8> @dup_imm_i8() {
+; CHECK-LABEL: dup_imm_i8:
+; CHECK: mov z0.b, #16
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 16)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @dup_i16(i16 %b) {
+; CHECK-LABEL: dup_i16:
+; CHECK: mov z0.h, w0
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @dup_imm_i16(i16 %b) {
+; CHECK-LABEL: dup_imm_i16:
+; CHECK: mov z0.h, #16
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 16)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @dup_i32(i32 %b) {
+; CHECK-LABEL: dup_i32:
+; CHECK: mov z0.s, w0
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @dup_imm_i32(i32 %b) {
+; CHECK-LABEL: dup_imm_i32:
+; CHECK: mov z0.s, #16
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 16)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @dup_i64(i64 %b) {
+; CHECK-LABEL: dup_i64:
+; CHECK: mov z0.d, x0
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @dup_imm_i64(i64 %b) {
+; CHECK-LABEL: dup_imm_i64:
+; CHECK: mov z0.d, #16
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 16)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 8 x half> @dup_f16(half %b) {
+; CHECK-LABEL: dup_f16:
+; CHECK: mov z0.h, h0
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half %b)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 8 x half> @dup_imm_f16(half %b) {
+; CHECK-LABEL: dup_imm_f16:
+; CHECK: mov z0.h, #16.00000000
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half 16.)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @dup_f32(float %b) {
+; CHECK-LABEL: dup_f32:
+; CHECK: mov z0.s, s0
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 4 x float> @dup_imm_f32(float %b) {
+; CHECK-LABEL: dup_imm_f32:
+; CHECK: mov z0.s, #16.00000000
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float 16.)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @dup_f64(double %b) {
+; CHECK-LABEL: dup_f64:
+; CHECK: mov z0.d, d0
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %b)
+  ret <vscale x 2 x double> %out
+}
+
+define <vscale x 2 x double> @dup_imm_f64(double %b) {
+; CHECK-LABEL: dup_imm_f64:
+; CHECK: mov z0.d, #16.00000000
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double 16.)
+  ret <vscale x 2 x double> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8( i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)
+declare <vscale x 8 x half> @llvm.aarch64.sve.dup.x.nxv8f16(half)
+declare <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float)
+declare <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double)


        


More information about the llvm-commits mailing list