[llvm-branch-commits] [llvm] aefedb1 - [VE] Add logical mask intrinsic instructions

Kazushi Marukawa via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Dec 14 08:39:20 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-12-15T01:34:31+09:00
New Revision: aefedb170734d680516c3875873c80fc29498b43

URL: https://github.com/llvm/llvm-project/commit/aefedb170734d680516c3875873c80fc29498b43
DIFF: https://github.com/llvm/llvm-project/commit/aefedb170734d680516c3875873c80fc29498b43.diff

LOG: [VE] Add logical mask intrinsic instructions

Add andm, orm, xorm, eqvm, nndm, negm, pcvm, lzvm, and tovm intrinsic
instructions, a few pseudo instructions to expand logical intrinsic
using VM512, a mechnism to expand such pseudo instructions, and
regression tests.  Also, assign vector mask types and vector mask
register classes correctly.  This is required to use VM512 registers
as function arguments.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D93093

Added: 
    llvm/test/CodeGen/VE/VELIntrinsics/andm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/eqvm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/lzvm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/negm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/nndm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/orm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/pcvm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/tovm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/xorm.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
    llvm/lib/Target/VE/VEInstrInfo.cpp
    llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
    llvm/lib/Target/VE/VEInstrVec.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
index c22fecafb39d..67cbd307903d 100644
--- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
+++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td
@@ -1196,3 +1196,18 @@ let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssl : GCCBuiltin<"__builtin_ve
 let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
 let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_andm_mmm : GCCBuiltin<"__builtin_ve_vl_andm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_andm_MMM : GCCBuiltin<"__builtin_ve_vl_andm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_orm_mmm : GCCBuiltin<"__builtin_ve_vl_orm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_orm_MMM : GCCBuiltin<"__builtin_ve_vl_orm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_xorm_mmm : GCCBuiltin<"__builtin_ve_vl_xorm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_xorm_MMM : GCCBuiltin<"__builtin_ve_vl_xorm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_eqvm_mmm : GCCBuiltin<"__builtin_ve_vl_eqvm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_eqvm_MMM : GCCBuiltin<"__builtin_ve_vl_eqvm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_nndm_mmm : GCCBuiltin<"__builtin_ve_vl_nndm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_nndm_MMM : GCCBuiltin<"__builtin_ve_vl_nndm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_negm_mm : GCCBuiltin<"__builtin_ve_vl_negm_mm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_negm_MM : GCCBuiltin<"__builtin_ve_vl_negm_MM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_pcvm_sml : GCCBuiltin<"__builtin_ve_vl_pcvm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_lzvm_sml : GCCBuiltin<"__builtin_ve_vl_lzvm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
+let TargetPrefix = "ve" in def int_ve_vl_tovm_sml : GCCBuiltin<"__builtin_ve_vl_tovm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;

diff  --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index 8b56336008a6..530c5d655931 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -731,6 +731,32 @@ static Register getVM512Upper(Register reg) {
 
 static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
 
+// Expand pseudo logical vector instructions for VM512 registers.
+static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
+  MachineBasicBlock *MBB = MI.getParent();
+  DebugLoc DL = MI.getDebugLoc();
+
+  Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
+  Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
+  Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
+  Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
+
+  switch (MI.getOpcode()) {
+  default: {
+    Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
+    Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
+    BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
+    BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
+    break;
+  }
+  case VE::NEGMy:
+    BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
+    BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
+    break;
+  }
+  MI.eraseFromParent();
+}
+
 static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI,
                                bool Upper) {
   // VM512
@@ -812,6 +838,25 @@ bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
     return expandGetStackTopPseudo(MI);
   }
 
+  case VE::ANDMyy:
+    expandPseudoLogM(MI, get(VE::ANDMmm));
+    return true;
+  case VE::ORMyy:
+    expandPseudoLogM(MI, get(VE::ORMmm));
+    return true;
+  case VE::XORMyy:
+    expandPseudoLogM(MI, get(VE::XORMmm));
+    return true;
+  case VE::EQVMyy:
+    expandPseudoLogM(MI, get(VE::EQVMmm));
+    return true;
+  case VE::NNDMyy:
+    expandPseudoLogM(MI, get(VE::NNDMmm));
+    return true;
+  case VE::NEGMy:
+    expandPseudoLogM(MI, get(VE::NEGMm));
+    return true;
+
   case VE::LVMyir:
   case VE::LVMyim:
   case VE::LVMyir_y:

diff  --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
index dbd173ef3690..9ec10838db05 100644
--- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
+++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td
@@ -1587,3 +1587,18 @@ def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz,
 def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCOTvrzvml v256f64:$vy, i64:$sy, (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
 def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, i64:$sz, v256i1:$vm, i32:$vl), (VSCLNCOTvirvml v256f64:$vy, (LO7 $I), i64:$sz, v256f64:$vx, v256i1:$vm, i32:$vl)>;
 def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, simm7:$I, zero:$Z, v256i1:$vm, i32:$vl), (VSCLNCOTvizvml v256f64:$vy, (LO7 $I), (LO7 $Z), v256f64:$vx, v256i1:$vm, i32:$vl)>;
+def : Pat<(int_ve_vl_andm_mmm v256i1:$vmy, v256i1:$vmz), (ANDMmm v256i1:$vmy, v256i1:$vmz)>;
+def : Pat<(int_ve_vl_andm_MMM v512i1:$vmy, v512i1:$vmz), (ANDMyy v512i1:$vmy, v512i1:$vmz)>;
+def : Pat<(int_ve_vl_orm_mmm v256i1:$vmy, v256i1:$vmz), (ORMmm v256i1:$vmy, v256i1:$vmz)>;
+def : Pat<(int_ve_vl_orm_MMM v512i1:$vmy, v512i1:$vmz), (ORMyy v512i1:$vmy, v512i1:$vmz)>;
+def : Pat<(int_ve_vl_xorm_mmm v256i1:$vmy, v256i1:$vmz), (XORMmm v256i1:$vmy, v256i1:$vmz)>;
+def : Pat<(int_ve_vl_xorm_MMM v512i1:$vmy, v512i1:$vmz), (XORMyy v512i1:$vmy, v512i1:$vmz)>;
+def : Pat<(int_ve_vl_eqvm_mmm v256i1:$vmy, v256i1:$vmz), (EQVMmm v256i1:$vmy, v256i1:$vmz)>;
+def : Pat<(int_ve_vl_eqvm_MMM v512i1:$vmy, v512i1:$vmz), (EQVMyy v512i1:$vmy, v512i1:$vmz)>;
+def : Pat<(int_ve_vl_nndm_mmm v256i1:$vmy, v256i1:$vmz), (NNDMmm v256i1:$vmy, v256i1:$vmz)>;
+def : Pat<(int_ve_vl_nndm_MMM v512i1:$vmy, v512i1:$vmz), (NNDMyy v512i1:$vmy, v512i1:$vmz)>;
+def : Pat<(int_ve_vl_negm_mm v256i1:$vmy), (NEGMm v256i1:$vmy)>;
+def : Pat<(int_ve_vl_negm_MM v512i1:$vmy), (NEGMy v512i1:$vmy)>;
+def : Pat<(int_ve_vl_pcvm_sml v256i1:$vmy, i32:$vl), (PCVMml v256i1:$vmy, i32:$vl)>;
+def : Pat<(int_ve_vl_lzvm_sml v256i1:$vmy, i32:$vl), (LZVMml v256i1:$vmy, i32:$vl)>;
+def : Pat<(int_ve_vl_tovm_sml v256i1:$vmy, i32:$vl), (TOVMml v256i1:$vmy, i32:$vl)>;

diff  --git a/llvm/lib/Target/VE/VEInstrVec.td b/llvm/lib/Target/VE/VEInstrVec.td
index 886bb12c1df3..4a8476f7288a 100644
--- a/llvm/lib/Target/VE/VEInstrVec.td
+++ b/llvm/lib/Target/VE/VEInstrVec.td
@@ -43,6 +43,22 @@ let hasSideEffects = 0, isCodeGenOnly = 1, DisableEncoding = "$vl" in {
                          "# pseudo-vfmk.s.$cf $vmx, $vz, $vm">;
 }
 
+// ANDM/ORM/XORM/EQVM/NNDM/NEGM instructions using VM512
+let hasSideEffects = 0, isCodeGenOnly = 1 in {
+  def ANDMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
+                      "# andm $vmx, $vmy, $vmz">;
+  def ORMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
+                     "# orm $vmx, $vmy, $vmz">;
+  def XORMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
+                      "# xorm $vmx, $vmy, $vmz">;
+  def EQVMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
+                      "# eqvm $vmx, $vmy, $vmz">;
+  def NNDMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
+                      "# nndm $vmx, $vmy, $vmz">;
+  def NEGMy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy),
+                     "# negm $vmx, $vmy">;
+}
+
 //===----------------------------------------------------------------------===//
 // Instructions
 //

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/andm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/andm.ll
new file mode 100644
index 000000000000..bce8d21ef4dc
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/andm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test and vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test ANDM*mm and ANDM*yy instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @andm_mmm(<256 x i1> %0, <256 x i1> %1) {
+; CHECK-LABEL: andm_mmm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andm %vm1, %vm1, %vm2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <256 x i1> @llvm.ve.vl.andm.mmm(<256 x i1> %0, <256 x i1> %1)
+  ret <256 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.andm.mmm(<256 x i1>, <256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @andm_MMM(<512 x i1> %0, <512 x i1> %1) {
+; CHECK-LABEL: andm_MMM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andm %vm2, %vm2, %vm4
+; CHECK-NEXT:    andm %vm3, %vm3, %vm5
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <512 x i1> @llvm.ve.vl.andm.MMM(<512 x i1> %0, <512 x i1> %1)
+  ret <512 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.andm.MMM(<512 x i1>, <512 x i1>)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/eqvm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/eqvm.ll
new file mode 100644
index 000000000000..f8f4b3805fda
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/eqvm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test equivalence vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test EQVM*mm and EQVM*yy instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @eqvm_mmm(<256 x i1> %0, <256 x i1> %1) {
+; CHECK-LABEL: eqvm_mmm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    eqvm %vm1, %vm1, %vm2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <256 x i1> @llvm.ve.vl.eqvm.mmm(<256 x i1> %0, <256 x i1> %1)
+  ret <256 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.eqvm.mmm(<256 x i1>, <256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @eqvm_MMM(<512 x i1> %0, <512 x i1> %1) {
+; CHECK-LABEL: eqvm_MMM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    eqvm %vm2, %vm2, %vm4
+; CHECK-NEXT:    eqvm %vm3, %vm3, %vm5
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <512 x i1> @llvm.ve.vl.eqvm.MMM(<512 x i1> %0, <512 x i1> %1)
+  ret <512 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.eqvm.MMM(<512 x i1>, <512 x i1>)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/lzvm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/lzvm.ll
new file mode 100644
index 000000000000..7146f39f8aef
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/lzvm.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test leading zero of vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test LZVM*ml instruction.
+
+; Function Attrs: nounwind readnone
+define fastcc i64 @lzvm_sml(<256 x i1> %0) {
+; CHECK-LABEL: lzvm_sml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    lzvm %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call i64 @llvm.ve.vl.lzvm.sml(<256 x i1> %0, i32 256)
+  ret i64 %2
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ve.vl.lzvm.sml(<256 x i1>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/negm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/negm.ll
new file mode 100644
index 000000000000..e2189093b524
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/negm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test negate vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test NEGM*m and NEGM*y instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @negm_mm(<256 x i1> %0) {
+; CHECK-LABEL: negm_mm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    negm %vm1, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call <256 x i1> @llvm.ve.vl.negm.mm(<256 x i1> %0)
+  ret <256 x i1> %2
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.negm.mm(<256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @negm_MM(<512 x i1> %0) {
+; CHECK-LABEL: negm_MM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    negm %vm2, %vm2
+; CHECK-NEXT:    negm %vm3, %vm3
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call <512 x i1> @llvm.ve.vl.negm.MM(<512 x i1> %0)
+  ret <512 x i1> %2
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.negm.MM(<512 x i1>)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/nndm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/nndm.ll
new file mode 100644
index 000000000000..ee2dd38d39f2
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/nndm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test negate and vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test NNDM*mm and NNDM*yy instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @nndm_mmm(<256 x i1> %0, <256 x i1> %1) {
+; CHECK-LABEL: nndm_mmm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nndm %vm1, %vm1, %vm2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <256 x i1> @llvm.ve.vl.nndm.mmm(<256 x i1> %0, <256 x i1> %1)
+  ret <256 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.nndm.mmm(<256 x i1>, <256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @nndm_MMM(<512 x i1> %0, <512 x i1> %1) {
+; CHECK-LABEL: nndm_MMM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    nndm %vm2, %vm2, %vm4
+; CHECK-NEXT:    nndm %vm3, %vm3, %vm5
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <512 x i1> @llvm.ve.vl.nndm.MMM(<512 x i1> %0, <512 x i1> %1)
+  ret <512 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.nndm.MMM(<512 x i1>, <512 x i1>)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/orm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/orm.ll
new file mode 100644
index 000000000000..ff67c19c96db
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/orm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test or vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test ORM*mm and ORM*yy instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @orm_mmm(<256 x i1> %0, <256 x i1> %1) {
+; CHECK-LABEL: orm_mmm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    orm %vm1, %vm1, %vm2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <256 x i1> @llvm.ve.vl.orm.mmm(<256 x i1> %0, <256 x i1> %1)
+  ret <256 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.orm.mmm(<256 x i1>, <256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @orm_MMM(<512 x i1> %0, <512 x i1> %1) {
+; CHECK-LABEL: orm_MMM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    orm %vm2, %vm2, %vm4
+; CHECK-NEXT:    orm %vm3, %vm3, %vm5
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <512 x i1> @llvm.ve.vl.orm.MMM(<512 x i1> %0, <512 x i1> %1)
+  ret <512 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.orm.MMM(<512 x i1>, <512 x i1>)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/pcvm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/pcvm.ll
new file mode 100644
index 000000000000..f9e1aee663f4
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/pcvm.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test population count of vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test PCVM*ml instruction.
+
+; Function Attrs: nounwind readnone
+define fastcc i64 @pcvm_sml(<256 x i1> %0) {
+; CHECK-LABEL: pcvm_sml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pcvm %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call i64 @llvm.ve.vl.pcvm.sml(<256 x i1> %0, i32 256)
+  ret i64 %2
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ve.vl.pcvm.sml(<256 x i1>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/tovm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/tovm.ll
new file mode 100644
index 000000000000..c003d7505022
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/tovm.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test trailing one of vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test TOVM*ml instruction.
+
+; Function Attrs: nounwind readnone
+define fastcc i64 @tovm_sml(<256 x i1> %0) {
+; CHECK-LABEL: tovm_sml:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    tovm %s0, %vm1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %2 = tail call i64 @llvm.ve.vl.tovm.sml(<256 x i1> %0, i32 256)
+  ret i64 %2
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.ve.vl.tovm.sml(<256 x i1>, i32)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/xorm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/xorm.ll
new file mode 100644
index 000000000000..19e5da1bf226
--- /dev/null
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/xorm.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+;;; Test exclusive or vm intrinsic instructions
+;;;
+;;; Note:
+;;;   We test XORM*mm and XORM*yy instructions.
+
+; Function Attrs: nounwind readnone
+define fastcc <256 x i1> @xorm_mmm(<256 x i1> %0, <256 x i1> %1) {
+; CHECK-LABEL: xorm_mmm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorm %vm1, %vm1, %vm2
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <256 x i1> @llvm.ve.vl.xorm.mmm(<256 x i1> %0, <256 x i1> %1)
+  ret <256 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <256 x i1> @llvm.ve.vl.xorm.mmm(<256 x i1>, <256 x i1>)
+
+; Function Attrs: nounwind readnone
+define fastcc <512 x i1> @xorm_MMM(<512 x i1> %0, <512 x i1> %1) {
+; CHECK-LABEL: xorm_MMM:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorm %vm2, %vm2, %vm4
+; CHECK-NEXT:    xorm %vm3, %vm3, %vm5
+; CHECK-NEXT:    b.l.t (, %s10)
+  %3 = tail call <512 x i1> @llvm.ve.vl.xorm.MMM(<512 x i1> %0, <512 x i1> %1)
+  ret <512 x i1> %3
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.ve.vl.xorm.MMM(<512 x i1>, <512 x i1>)


        


More information about the llvm-branch-commits mailing list