[clang] e0c02dc - [PowerPC][Power10] Implement centrifuge, vector gather every nth bit, vector evaluate Builtins in LLVM/Clang

Amy Kwan via cfe-commits cfe-commits at lists.llvm.org
Thu Jun 25 19:34:58 PDT 2020


Author: Amy Kwan
Date: 2020-06-25T21:34:41-05:00
New Revision: e0c02dc9800ebd317d1369848f4e74c8f783533a

URL: https://github.com/llvm/llvm-project/commit/e0c02dc9800ebd317d1369848f4e74c8f783533a
DIFF: https://github.com/llvm/llvm-project/commit/e0c02dc9800ebd317d1369848f4e74c8f783533a.diff

LOG: [PowerPC][Power10] Implement centrifuge, vector gather every nth bit, vector evaluate Builtins in LLVM/Clang

This patch implements builtins for the following prototypes:

unsigned long long __builtin_cfuged (unsigned long long, unsigned long long);
vector unsigned long long vec_cfuge (vector unsigned long long, vector unsigned long long);
unsigned long long vec_gnb (vector unsigned __int128, const unsigned int);
vector unsigned char vec_ternarylogic (vector unsigned char, vector unsigned char, vector unsigned char, const unsigned int);
vector unsigned short vec_ternarylogic (vector unsigned short, vector unsigned short, vector unsigned short, const unsigned int);
vector unsigned int vec_ternarylogic (vector unsigned int, vector unsigned int, vector unsigned int, const unsigned int);
vector unsigned long long vec_ternarylogic (vector unsigned long long, vector unsigned long long, vector unsigned long long, const unsigned int);
vector unsigned __int128 vec_ternarylogic (vector unsigned __int128, vector unsigned __int128, vector unsigned __int128, const unsigned int);

Differential Revision: https://reviews.llvm.org/D80970

Added: 
    

Modified: 
    clang/include/clang/Basic/BuiltinsPPC.def
    clang/lib/Headers/altivec.h
    clang/lib/Sema/SemaChecking.cpp
    clang/test/CodeGen/builtins-ppc-p10.c
    clang/test/CodeGen/builtins-ppc-p10vector.c
    llvm/include/llvm/IR/IntrinsicsPowerPC.td
    llvm/lib/Target/PowerPC/PPCInstrPrefix.td
    llvm/test/CodeGen/PowerPC/p10-bit-manip-ops.ll
    llvm/test/MC/Disassembler/PowerPC/p10insts.txt
    llvm/test/MC/PowerPC/p10.s

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index 5bc41c9d6cea..fa5b0b9d0920 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -302,6 +302,12 @@ BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "")
 BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "")
 BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "")
 
+// P10 Vector Centrifuge built-in.
+BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "")
+
+// P10 Vector Gather Every N-th Bit built-in.
+BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "")
+
 // P10 Vector Clear Bytes built-ins.
 BUILTIN(__builtin_altivec_vclrlb, "V16cV16cUi", "")
 BUILTIN(__builtin_altivec_vclrrb, "V16cV16cUi", "")
@@ -439,6 +445,8 @@ BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "")
 BUILTIN(__builtin_vsx_xxpermdi, "v.", "t")
 BUILTIN(__builtin_vsx_xxsldwi, "v.", "t")
 
+BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "")
+
 // Float 128 built-ins
 BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "")
 BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "")
@@ -489,6 +497,7 @@ BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "")
 BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "")
 BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "")
 BUILTIN(__builtin_pextd, "ULLiULLiULLi", "")
+BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "")
 BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "")
 BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "")
 

diff  --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index f9fd3e2e50eb..91279119630d 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -16777,6 +16777,42 @@ vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
   return __builtin_altivec_vpextd(__a, __b);
 }
 
+/* vec_cfuge */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vcfuged(__a, __b);
+}
+
+/* vec_gnb */
+
+#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
+
+/* vec_ternarylogic */
+#ifdef __VSX__
+#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned short                                             \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned int                                               \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned long long                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned __int128                                          \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)))
+#endif /* __VSX__ */
+
 /* vec_genpcvm */
 
 #ifdef __VSX__

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 9d0516e0232e..b4554c9fd55a 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3124,6 +3124,10 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
            SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
   case PPC::BI__builtin_pack_vector_int128:
     return SemaVSXCheck(TheCall);
+  case PPC::BI__builtin_altivec_vgnb:
+     return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
+  case PPC::BI__builtin_vsx_xxeval:
+     return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
   }
   return SemaBuiltinConstantArgRange(TheCall, i, l, u);
 }

diff  --git a/clang/test/CodeGen/builtins-ppc-p10.c b/clang/test/CodeGen/builtins-ppc-p10.c
index 5776dfae66dc..38c8988a3e37 100644
--- a/clang/test/CodeGen/builtins-ppc-p10.c
+++ b/clang/test/CodeGen/builtins-ppc-p10.c
@@ -14,6 +14,11 @@ unsigned long long test_pextd(void) {
   return __builtin_pextd(ulla, ullb);
 }
 
+unsigned long long test_cfuged(void) {
+  // CHECK: @llvm.ppc.cfuged
+  return __builtin_cfuged(ulla, ullb);
+}
+
 unsigned long long test_cntlzdm(void) {
   // CHECK: @llvm.ppc.cntlzdm
   return __builtin_cntlzdm(ulla, ullb);

diff  --git a/clang/test/CodeGen/builtins-ppc-p10vector.c b/clang/test/CodeGen/builtins-ppc-p10vector.c
index 42c0ed917801..f7930667af79 100644
--- a/clang/test/CodeGen/builtins-ppc-p10vector.c
+++ b/clang/test/CodeGen/builtins-ppc-p10vector.c
@@ -6,10 +6,11 @@
 #include <altivec.h>
 
 vector signed char vsca;
-vector unsigned char vuca;
-vector unsigned short vusa;
-vector unsigned int vuia;
-vector unsigned long long vulla, vullb;
+vector unsigned char vuca, vucb, vucc;
+vector unsigned short vusa, vusb, vusc;
+vector unsigned int vuia, vuib, vuic;
+vector unsigned long long vulla, vullb, vullc;
+vector unsigned __int128 vui128a, vui128b, vui128c;
 unsigned int uia;
 
 vector unsigned long long test_vpdepd(void) {
@@ -24,6 +25,60 @@ vector unsigned long long test_vpextd(void) {
   return vec_pext(vulla, vullb);
 }
 
+vector unsigned long long test_vcfuged(void) {
+  // CHECK: @llvm.ppc.altivec.vcfuged(<2 x i64>
+  // CHECK-NEXT: ret <2 x i64>
+  return vec_cfuge(vulla, vullb);
+}
+
+unsigned long long test_vgnb_1(void) {
+  // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 2)
+  // CHECK-NEXT: ret i64
+  return vec_gnb(vui128a, 2);
+}
+
+unsigned long long test_vgnb_2(void) {
+  // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 7)
+  // CHECK-NEXT: ret i64
+  return vec_gnb(vui128a, 7);
+}
+
+unsigned long long test_vgnb_3(void) {
+  // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 5)
+  // CHECK-NEXT: ret i64
+  return vec_gnb(vui128a, 5);
+}
+
+vector unsigned char test_xxeval_uc(void) {
+  // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 0)
+  // CHECK: ret <16 x i8>
+  return vec_ternarylogic(vuca, vucb, vucc, 0);
+}
+
+vector unsigned short test_xxeval_us(void) {
+  // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 255)
+  // CHECK: ret <8 x i16>
+  return vec_ternarylogic(vusa, vusb, vusc, 255);
+}
+
+vector unsigned int test_xxeval_ui(void) {
+  // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 150)
+  // CHECK: ret <4 x i32>
+  return vec_ternarylogic(vuia, vuib, vuic, 150);
+}
+
+vector unsigned long long test_xxeval_ull(void) {
+  // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 1)
+  // CHECK: ret <2 x i64>
+  return vec_ternarylogic(vulla, vullb, vullc, 1);
+}
+
+vector unsigned __int128 test_xxeval_ui128(void) {
+  // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 246)
+  // CHECK: ret <1 x i128>
+  return vec_ternarylogic(vui128a, vui128b, vui128c, 246);
+}
+
 vector unsigned char test_xxgenpcvbm(void) {
   // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
   // CHECK-NEXT: ret <16 x i8>

diff  --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index 0f8521e5b6c5..cfd3b0b7566d 100644
--- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -68,6 +68,11 @@ let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
       : GCCBuiltin<"__builtin_pextd">,
         Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
 
+  // Centrifuge Doubleword Builtin.
+  def int_ppc_cfuged
+      : GCCBuiltin<"__builtin_cfuged">,
+        Intrinsic <[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
   // Count Leading / Trailing Zeroes under bit Mask Builtins.
   def int_ppc_cntlzdm
       : GCCBuiltin<"__builtin_cntlzdm">,
@@ -426,6 +431,16 @@ let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
               Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                         [IntrNoMem]>;
 
+  // P10 Vector Centrifuge Builtin.
+  def int_ppc_altivec_vcfuged : GCCBuiltin<"__builtin_altivec_vcfuged">,
+              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+                        [IntrNoMem]>;
+
+  // P10 Vector Gather Every Nth Bit Builtin.
+  def int_ppc_altivec_vgnb : GCCBuiltin<"__builtin_altivec_vgnb">,
+              Intrinsic<[llvm_i64_ty], [llvm_v1i128_ty, llvm_i32_ty],
+                        [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
    // P10 Vector Clear Bytes
    def int_ppc_altivec_vclrlb :  GCCBuiltin<"__builtin_altivec_vclrlb">,
                Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
@@ -969,6 +984,11 @@ def int_ppc_vsx_xxinsertw :
       PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
                             [llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
                             [IntrNoMem]>;
+def int_ppc_vsx_xxeval :
+      PowerPC_VSX_Intrinsic<"xxeval", [llvm_v2i64_ty],
+                           [llvm_v2i64_ty, llvm_v2i64_ty,
+                            llvm_v2i64_ty, llvm_i32_ty],
+                           [IntrNoMem, ImmArg<ArgIndex<3>>]>;
 def int_ppc_vsx_xxgenpcvbm :
       PowerPC_VSX_Intrinsic<"xxgenpcvbm", [llvm_v16i8_ty],
                             [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
index 381da1b87c36..7bec4fdbb8a6 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -177,6 +177,54 @@ class XForm_XT6_IMM5_VB5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
   let Inst{31} = XT{5};
 }
 
+class 8RR_XX4Form_IMM8_XTAB6<bits<6> opcode, bits<2> xo,
+                             dag OOL, dag IOL, string asmstr,
+                             InstrItinClass itin, list<dag> pattern>
+  : PI<1, opcode, OOL, IOL, asmstr, itin> {
+    bits<6> XT;
+    bits<6> XA;
+    bits<6> XB;
+    bits<6> XC;
+    bits<8> IMM;
+
+    let Pattern = pattern;
+
+    // The prefix.
+    let Inst{6-7} = 1;
+    let Inst{8} = 0;
+    let Inst{9-11} = 0;
+    let Inst{12-13} = 0;
+    let Inst{14-23} = 0;
+    let Inst{24-31} = IMM;
+
+    // The instruction.
+    let Inst{38-42} = XT{4-0};
+    let Inst{43-47} = XA{4-0};
+    let Inst{48-52} = XB{4-0};
+    let Inst{53-57} = XC{4-0};
+    let Inst{58-59} = xo;
+    let Inst{60} = XC{5};
+    let Inst{61} = XA{5};
+    let Inst{62} = XB{5};
+    let Inst{63} = XT{5};
+}
+
+class VXForm_RD5_N3_VB5<bits<11> xo, dag OOL, dag IOL, string asmstr,
+                        InstrItinClass itin, list<dag> pattern>
+  : I<4, OOL, IOL, asmstr, itin> {
+  bits<5> RD;
+  bits<5> VB;
+  bits<3> N;
+
+  let Pattern = pattern;
+
+  let Inst{6-10}  = RD;
+  let Inst{11-12} = 0;
+  let Inst{13-15} = N;
+  let Inst{16-20} = VB;
+  let Inst{21-31} = xo;
+}
+
 multiclass MLS_DForm_R_SI34_RTA5_MEM_p<bits<6> opcode, dag OOL, dag IOL,
                                        dag PCRel_IOL, string asmstr,
                                        InstrItinClass itin> {
@@ -532,6 +580,23 @@ let Predicates = [IsISA3_1] in {
    def PEXTD : XForm_6<31, 188, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
                        "pextd $rA, $rS, $rB", IIC_IntGeneral,
                        [(set i64:$rA, (int_ppc_pextd i64:$rS, i64:$rB))]>;
+   def VCFUGED : VXForm_1<1357, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+                          "vcfuged $vD, $vA, $vB", IIC_VecGeneral,
+                          [(set v2i64:$vD,
+                          (int_ppc_altivec_vcfuged v2i64:$vA, v2i64:$vB))]>;
+   def VGNB : VXForm_RD5_N3_VB5<1228, (outs g8rc:$rD), (ins vrrc:$vB, u3imm:$N),
+                                "vgnb $rD, $vB, $N", IIC_VecGeneral,
+                                [(set i64:$rD,
+                                (int_ppc_altivec_vgnb v1i128:$vB, timm:$N))]>;
+   def CFUGED : XForm_6<31, 220, (outs g8rc:$rA), (ins g8rc:$rS, g8rc:$rB),
+                        "cfuged $rA, $rS, $rB", IIC_IntGeneral,
+                        [(set i64:$rA, (int_ppc_cfuged i64:$rS, i64:$rB))]>;
+   def XXEVAL :
+     8RR_XX4Form_IMM8_XTAB6<34, 1, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB,
+                            vsrc:$XC, u8imm:$IMM),
+                            "xxeval $XT, $XA, $XB, $XC, $IMM", IIC_VecGeneral,
+                            [(set v2i64:$XT, (int_ppc_vsx_xxeval v2i64:$XA,
+                                  v2i64:$XB, v2i64:$XC, timm:$IMM))]>;
    def VCLZDM : VXForm_1<1924, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
                          "vclzdm $vD, $vA, $vB", IIC_VecGeneral,
                          [(set v2i64:$vD,

diff  --git a/llvm/test/CodeGen/PowerPC/p10-bit-manip-ops.ll b/llvm/test/CodeGen/PowerPC/p10-bit-manip-ops.ll
index d2a00b048446..901b449bb5c3 100644
--- a/llvm/test/CodeGen/PowerPC/p10-bit-manip-ops.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-bit-manip-ops.ll
@@ -9,6 +9,10 @@ declare <2 x i64> @llvm.ppc.altivec.vpdepd(<2 x i64>, <2 x i64>)
 declare <2 x i64> @llvm.ppc.altivec.vpextd(<2 x i64>, <2 x i64>)
 declare i64 @llvm.ppc.pdepd(i64, i64)
 declare i64 @llvm.ppc.pextd(i64, i64)
+declare <2 x i64> @llvm.ppc.altivec.vcfuged(<2 x i64>, <2 x i64>)
+declare i64 @llvm.ppc.cfuged(i64, i64)
+declare i64 @llvm.ppc.altivec.vgnb(<1 x i128>, i32)
+declare <2 x i64> @llvm.ppc.vsx.xxeval(<2 x i64>, <2 x i64>, <2 x i64>, i32)
 declare <2 x i64> @llvm.ppc.altivec.vclzdm(<2 x i64>, <2 x i64>)
 declare <2 x i64> @llvm.ppc.altivec.vctzdm(<2 x i64>, <2 x i64>)
 declare i64 @llvm.ppc.cntlzdm(i64, i64)
@@ -54,6 +58,66 @@ entry:
   ret i64 %tmp
 }
 
+define <2 x i64> @test_vcfuged(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vcfuged:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vcfuged v2, v2, v3
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcfuged(<2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %tmp
+}
+
+define i64 @test_cfuged(i64 %a, i64 %b) {
+; CHECK-LABEL: test_cfuged:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cfuged r3, r3, r4
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call i64 @llvm.ppc.cfuged(i64 %a, i64 %b)
+  ret i64 %tmp
+}
+
+define i64 @test_vgnb_1(<1 x i128> %a) {
+; CHECK-LABEL: test_vgnb_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vgnb r3, v2, 2
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call i64 @llvm.ppc.altivec.vgnb(<1 x i128> %a, i32 2)
+  ret i64 %tmp
+}
+
+define i64 @test_vgnb_2(<1 x i128> %a) {
+; CHECK-LABEL: test_vgnb_2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vgnb r3, v2, 7
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call i64 @llvm.ppc.altivec.vgnb(<1 x i128> %a, i32 7)
+  ret i64 %tmp
+}
+
+define i64 @test_vgnb_3(<1 x i128> %a) {
+; CHECK-LABEL: test_vgnb_3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vgnb r3, v2, 5
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call i64 @llvm.ppc.altivec.vgnb(<1 x i128> %a, i32 5)
+  ret i64 %tmp
+}
+
+define <2 x i64> @test_xxeval(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; CHECK-LABEL: test_xxeval:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xxeval v2, v2, v3, v4, 255
+; CHECK-NEXT:    blr
+entry:
+  %tmp = tail call <2 x i64> @llvm.ppc.vsx.xxeval(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, i32 255)
+  ret <2 x i64> %tmp
+}
+
 define <2 x i64> @test_vclzdm(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: test_vclzdm:
 ; CHECK:       # %bb.0: # %entry

diff  --git a/llvm/test/MC/Disassembler/PowerPC/p10insts.txt b/llvm/test/MC/Disassembler/PowerPC/p10insts.txt
index a263004413da..5e690fe1c69f 100644
--- a/llvm/test/MC/Disassembler/PowerPC/p10insts.txt
+++ b/llvm/test/MC/Disassembler/PowerPC/p10insts.txt
@@ -13,6 +13,18 @@
 # CHECK: pextd 1, 2, 4
 0x7c 0x41 0x21 0x78
 
+# CHECK: vcfuged 1, 2, 4
+0x10 0x22 0x25 0x4d
+
+# CHECK: cfuged 1, 2, 4
+0x7c 0x41 0x21 0xb8
+
+# CHECK: vgnb 1, 2, 2
+0x10 0x22 0x14 0xcc
+
+# CHECK: xxeval 32, 1, 2, 3, 2
+0x05 0x00 0x00 0x02 0x88 0x01 0x10 0xd1
+
 # CHECK: vclzdm 1, 2, 3
 0x10 0x22 0x1f 0x84
 

diff  --git a/llvm/test/MC/PowerPC/p10.s b/llvm/test/MC/PowerPC/p10.s
index ec2afd8c5062..e0fbf58b91af 100644
--- a/llvm/test/MC/PowerPC/p10.s
+++ b/llvm/test/MC/PowerPC/p10.s
@@ -15,6 +15,20 @@
 # CHECK-BE: pextd 1, 2, 4                         # encoding: [0x7c,0x41,0x21,0x78]
 # CHECK-LE: pextd 1, 2, 4                         # encoding: [0x78,0x21,0x41,0x7c]
             pextd 1, 2, 4
+# CHECK-BE: vcfuged 1, 2, 4                       # encoding: [0x10,0x22,0x25,0x4d]
+# CHECK-LE: vcfuged 1, 2, 4                       # encoding: [0x4d,0x25,0x22,0x10]
+            vcfuged 1, 2, 4
+# CHECK-BE: cfuged 1, 2, 4                        # encoding: [0x7c,0x41,0x21,0xb8]
+# CHECK-LE: cfuged 1, 2, 4                        # encoding: [0xb8,0x21,0x41,0x7c]
+            cfuged 1, 2, 4
+# CHECK-BE: vgnb 1, 2, 2                          # encoding: [0x10,0x22,0x14,0xcc]
+# CHECK-LE: vgnb 1, 2, 2                          # encoding: [0xcc,0x14,0x22,0x10]
+            vgnb 1, 2, 2
+# CHECK-BE: xxeval 32, 1, 2, 3, 2                 # encoding: [0x05,0x00,0x00,0x02,
+# CHECK-BE-SAME:                                               0x88,0x01,0x10,0xd1]
+# CHECK-LE: xxeval 32, 1, 2, 3, 2                 # encoding: [0x02,0x00,0x00,0x05,
+# CHECK-LE-SAME:                                               0xd1,0x10,0x01,0x88]
+            xxeval 32, 1, 2, 3, 2
 # CHECK-BE: vclzdm 1, 2, 3                        # encoding: [0x10,0x22,0x1f,0x84]
 # CHECK-LE: vclzdm 1, 2, 3                        # encoding: [0x84,0x1f,0x22,0x10]
             vclzdm 1, 2, 3


        


More information about the cfe-commits mailing list