[clang] 88874f0 - [PowerPC]Implement Vector Shift Double Bit Immediate Builtins

Lei Huang via cfe-commits cfe-commits at lists.llvm.org
Wed Jul 1 18:43:39 PDT 2020


Author: Biplob Mishra
Date: 2020-07-01T20:34:53-05:00
New Revision: 88874f07464467f3852dd662d180f7738756649b

URL: https://github.com/llvm/llvm-project/commit/88874f07464467f3852dd662d180f7738756649b
DIFF: https://github.com/llvm/llvm-project/commit/88874f07464467f3852dd662d180f7738756649b.diff

LOG: [PowerPC]Implement Vector Shift Double Bit Immediate Builtins

Implement Vector Shift Double Bit Immediate Builtins in LLVM/Clang.
  * vec_sldb ();
  * vec_srdb ();

Differential Revision: https://reviews.llvm.org/D82440

Added: 
    llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll

Modified: 
    clang/include/clang/Basic/BuiltinsPPC.def
    clang/lib/Headers/altivec.h
    clang/lib/Sema/SemaChecking.cpp
    clang/test/CodeGen/builtins-ppc-p10vector.c
    llvm/include/llvm/IR/IntrinsicsPowerPC.td
    llvm/lib/Target/PowerPC/PPCInstrPrefix.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index fa5b0b9d0920..2a5ae08ea7b2 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -316,6 +316,10 @@ BUILTIN(__builtin_altivec_vclrrb, "V16cV16cUi", "")
 BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "")
 BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "")
 
+// P10 Vector Shift built-ins.
+BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "")
+BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "")
+
 // VSX built-ins.
 
 BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "")

diff  --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 91279119630d..85f712f18ef3 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -16881,6 +16881,14 @@ vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vctzdm(__a, __b);
 }
 
+/* vec_sldbi */
+
+#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
+
+/* vec_srdbi */
+
+#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+
 #endif /* __POWER10_VECTOR__ */
 
 #undef __ATTRS_o_ai

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index b4554c9fd55a..a8d25bd7e240 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3128,6 +3128,10 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
      return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
   case PPC::BI__builtin_vsx_xxeval:
      return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
+  case PPC::BI__builtin_altivec_vsldbi:
+     return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+  case PPC::BI__builtin_altivec_vsrdbi:
+     return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
   }
   return SemaBuiltinConstantArgRange(TheCall, i, l, u);
 }

diff  --git a/clang/test/CodeGen/builtins-ppc-p10vector.c b/clang/test/CodeGen/builtins-ppc-p10vector.c
index f7930667af79..efb63ce808cb 100644
--- a/clang/test/CodeGen/builtins-ppc-p10vector.c
+++ b/clang/test/CodeGen/builtins-ppc-p10vector.c
@@ -5,10 +5,13 @@
 
 #include <altivec.h>
 
-vector signed char vsca;
+vector signed char vsca, vscb;
 vector unsigned char vuca, vucb, vucc;
+vector signed short vssa, vssb;
 vector unsigned short vusa, vusb, vusc;
+vector signed int vsia, vsib;
 vector unsigned int vuia, vuib, vuic;
+vector signed long long vslla, vsllb;
 vector unsigned long long vulla, vullb, vullc;
 vector unsigned __int128 vui128a, vui128b, vui128c;
 unsigned int uia;
@@ -146,3 +149,111 @@ vector unsigned long long test_vctzdm(void) {
   // CHECK-NEXT: ret <2 x i64>
   return vec_cnttzm(vulla, vullb);
 }
+
+vector signed char test_vec_sldb_sc(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
+  // CHECK-NEXT: ret <16 x i8>
+  return vec_sldb(vsca, vscb, 0);
+  }
+
+vector unsigned char test_vec_sldb_uc(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
+  // CHECK-NEXT: ret <16 x i8>
+  return vec_sldb(vuca, vucb, 1);
+}
+
+vector signed short test_vec_sldb_ss(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
+  // CHECK-NEXT: ret <8 x i16>
+  return vec_sldb(vssa, vssb, 2);
+}
+
+vector unsigned short test_vec_sldb_us(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
+  // CHECK-NEXT: ret <8 x i16>
+  return vec_sldb(vusa, vusb, 3);
+}
+
+vector signed int test_vec_sldb_si(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
+  // CHECK-NEXT: ret <4 x i32>
+  return vec_sldb(vsia, vsib, 4);
+}
+
+vector unsigned int test_vec_sldb_ui(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
+  // CHECK-NEXT: ret <4 x i32>
+  return vec_sldb(vuia, vuib, 5);
+}
+
+vector signed long long test_vec_sldb_sll(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
+  // CHECK-NEXT: ret <2 x i64>
+  return vec_sldb(vslla, vsllb, 6);
+}
+
+vector unsigned long long test_vec_sldb_ull(void) {
+  // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
+  // CHECK-NEXT: ret <2 x i64>
+  return vec_sldb(vulla, vullb, 7);
+}
+
+vector signed char test_vec_srdb_sc(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
+  // CHECK-NEXT: ret <16 x i8>
+  return vec_srdb(vsca, vscb, 8);
+}
+
+vector unsigned char test_vec_srdb_uc(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
+  // CHECK-NEXT: ret <16 x i8>
+  return vec_srdb(vuca, vucb, 9);
+}
+
+vector signed short test_vec_srdb_ss(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
+  // CHECK-NEXT: ret <8 x i16>
+  return vec_srdb(vssa, vssb, 10);
+}
+
+vector unsigned short test_vec_srdb_us(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
+  // CHECK-NEXT: ret <8 x i16>
+  return vec_srdb(vusa, vusb, 3);
+}
+
+vector signed int test_vec_srdb_si(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
+  // CHECK-NEXT: ret <4 x i32>
+  return vec_srdb(vsia, vsib, 4);
+}
+
+vector unsigned int test_vec_srdb_ui(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
+  // CHECK-NEXT: ret <4 x i32>
+  return vec_srdb(vuia, vuib, 5);
+}
+
+vector signed long long test_vec_srdb_sll(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
+  // CHECK-NEXT: ret <2 x i64>
+  return vec_srdb(vslla, vsllb, 6);
+}
+
+vector unsigned long long test_vec_srdb_ull(void) {
+  // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
+  // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
+  // CHECK-NEXT: ret <2 x i64>
+  return vec_srdb(vulla, vullb, 7);
+}

diff  --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index f7c5b1638cdc..9ad9c6295200 100644
--- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -454,6 +454,16 @@ let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
    def int_ppc_altivec_vclrrb :  GCCBuiltin<"__builtin_altivec_vclrrb">,
                Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
                          [IntrNoMem]>;
+
+  // P10 Vector Shift Double Bit Immediate.
+  def int_ppc_altivec_vsldbi : GCCBuiltin<"__builtin_altivec_vsldbi">,
+              Intrinsic<[llvm_v16i8_ty],
+                        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+                        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+  def int_ppc_altivec_vsrdbi : GCCBuiltin<"__builtin_altivec_vsrdbi">,
+              Intrinsic<[llvm_v16i8_ty],
+                        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+                        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
 }
 
 // Vector average.

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
index 49217820ea95..d78bd485b6f7 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -738,11 +738,19 @@ let Predicates = [IsISA3_1] in {
   def VSLDBI : VNForm_VTAB5_SD3<22, 0, (outs vrrc:$VRT),
                                 (ins vrrc:$VRA, vrrc:$VRB, u3imm:$SH),
                                 "vsldbi $VRT, $VRA, $VRB, $SH",
-                                IIC_VecGeneral, []>;
+                                IIC_VecGeneral, 
+                                [(set v16i8:$VRT,
+                                      (int_ppc_altivec_vsldbi v16i8:$VRA,
+                                                              v16i8:$VRB,
+                                                              i32:$SH))]>;
   def VSRDBI : VNForm_VTAB5_SD3<22, 1, (outs vrrc:$VRT),
                                 (ins vrrc:$VRA, vrrc:$VRB, u3imm:$SH),
                                 "vsrdbi $VRT, $VRA, $VRB, $SH",
-                                IIC_VecGeneral, []>;
+                                IIC_VecGeneral,
+                                [(set v16i8:$VRT,
+                                      (int_ppc_altivec_vsrdbi v16i8:$VRA,
+                                                              v16i8:$VRB, 
+                                                              i32:$SH))]>;
    def VPDEPD : VXForm_1<1485, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
                          "vpdepd $vD, $vA, $vB", IIC_VecGeneral,
                          [(set v2i64:$vD,

diff  --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll
new file mode 100644
index 000000000000..ce4fa5b25b5a
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -mcpu=pwr10 \
+; RUN:   -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
+
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -mcpu=pwr10 \
+; RUN:   -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-unknown < %s | FileCheck %s
+
+define <16 x i8> @testVSLDBI(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: testVSLDBI:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsldbi v2, v2, v3, 1
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <16 x i8> @llvm.ppc.altivec.vsldbi(<16 x i8> %a, <16 x i8> %b, i32 1)
+  ret <16 x i8> %0
+}
+declare <16 x i8> @llvm.ppc.altivec.vsldbi(<16 x i8>, <16 x i8>, i32 immarg)
+
+define <16 x i8> @testVSRDBI(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: testVSRDBI:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrdbi v2, v2, v3, 1
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <16 x i8> @llvm.ppc.altivec.vsrdbi(<16 x i8> %a, <16 x i8> %b, i32 1)
+  ret <16 x i8> %0
+}
+declare <16 x i8> @llvm.ppc.altivec.vsrdbi(<16 x i8>, <16 x i8>, i32 immarg)


        


More information about the cfe-commits mailing list