[llvm-branch-commits] [llvm] c3acda0 - [VE] Vector 'and' isel and tests

Simon Moll via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Dec 23 04:34:16 PST 2020


Author: Simon Moll
Date: 2020-12-23T13:29:29+01:00
New Revision: c3acda0798f9b10ac3187ad941bbd8af82fb84a1

URL: https://github.com/llvm/llvm-project/commit/c3acda0798f9b10ac3187ad941bbd8af82fb84a1
DIFF: https://github.com/llvm/llvm-project/commit/c3acda0798f9b10ac3187ad941bbd8af82fb84a1.diff

LOG: [VE] Vector 'and' isel and tests

Reviewed By: kaz7

Differential Revision: https://reviews.llvm.org/D93709

Added: 
    llvm/test/CodeGen/VE/Vector/vec_and.ll

Modified: 
    llvm/lib/Target/VE/VVPInstrInfo.td
    llvm/lib/Target/VE/VVPInstrPatternsVec.td
    llvm/lib/Target/VE/VVPNodes.def

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/VE/VVPInstrInfo.td b/llvm/lib/Target/VE/VVPInstrInfo.td
index 81fbfe03b48f..2c88d5099a7b 100644
--- a/llvm/lib/Target/VE/VVPInstrInfo.td
+++ b/llvm/lib/Target/VE/VVPInstrInfo.td
@@ -40,4 +40,7 @@ class vvp_commutative<SDNode RootOp> :
 def vvp_add    : SDNode<"VEISD::VVP_ADD",  SDTIntBinOpVVP>;
 def c_vvp_add  : vvp_commutative<vvp_add>;
 
+def vvp_and    : SDNode<"VEISD::VVP_AND",  SDTIntBinOpVVP>;
+def c_vvp_and  : vvp_commutative<vvp_and>;
+
 // } Binary Operators

diff  --git a/llvm/lib/Target/VE/VVPInstrPatternsVec.td b/llvm/lib/Target/VE/VVPInstrPatternsVec.td
index 2345173314a4..7003fb387670 100644
--- a/llvm/lib/Target/VE/VVPInstrPatternsVec.td
+++ b/llvm/lib/Target/VE/VVPInstrPatternsVec.td
@@ -66,3 +66,6 @@ multiclass VectorBinaryArith_ShortLong<
 defm : VectorBinaryArith_ShortLong<c_vvp_add,
                                    i64, v256i64, "VADDSL",
                                    i32, v256i32, "VADDSWSX">;
+defm : VectorBinaryArith_ShortLong<c_vvp_and,
+                                   i64, v256i64, "VAND",
+                                   i32, v256i32, "PVANDLO">;

diff  --git a/llvm/lib/Target/VE/VVPNodes.def b/llvm/lib/Target/VE/VVPNodes.def
index 4319b332388e..1f9cbd790235 100644
--- a/llvm/lib/Target/VE/VVPNodes.def
+++ b/llvm/lib/Target/VE/VVPNodes.def
@@ -27,6 +27,7 @@
 // Integer arithmetic.
 ADD_BINARY_VVP_OP(VVP_ADD,ADD)
 
+ADD_BINARY_VVP_OP(VVP_AND,AND)
 
 #undef ADD_BINARY_VVP_OP
 #undef ADD_VVP_OP

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_and.ll b/llvm/test/CodeGen/VE/Vector/vec_and.ll
new file mode 100644
index 000000000000..8597e1aa511e
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Vector/vec_and.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+; <256 x i32>
+
+; Function Attrs: nounwind
+define fastcc <256 x i32> @and_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: and_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvand.lo %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = and <256 x i32> %x, %y
+  ret <256 x i32> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i32> @and_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: and_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvand.lo %v0, %s0, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = and <256 x i32> %vx, %y
+  ret <256 x i32> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i32> @and_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: and_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    pvand.lo %v0, %s0, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = and <256 x i32> %x, %vy
+  ret <256 x i32> %z
+}
+
+
+
+; <256 x i64>
+
+; Function Attrs: nounwind
+define fastcc <256 x i64> @and_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: and_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vand %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = and <256 x i64> %x, %y
+  ret <256 x i64> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i64> @and_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: and_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vand %v0, %s0, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = and <256 x i64> %vx, %y
+  ret <256 x i64> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i64> @and_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: and_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vand %v0, %s0, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = and <256 x i64> %x, %vy
+  ret <256 x i64> %z
+}
+
+; <128 x i64>
+; We expect this to be widened.
+
+; Function Attrs: nounwind
+define fastcc <128 x i64> @and_vv_v128i64(<128 x i64> %x, <128 x i64> %y) {
+; CHECK-LABEL: and_vv_v128i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vand %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = and <128 x i64> %x, %y
+  ret <128 x i64> %z
+}
+
+; <256 x i16>
+; We expect promotion.
+
+; Function Attrs: nounwind
+define fastcc <256 x i16> @and_vv_v256i16(<256 x i16> %x, <256 x i16> %y) {
+; CHECK-LABEL: and_vv_v256i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    pvand.lo %v0, %v0, %v1
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = and <256 x i16> %x, %y
+  ret <256 x i16> %z
+}
+
+; <128 x i16>
+; We expect this to be scalarized (for now).
+
+; Function Attrs: nounwind
+define fastcc <128 x i16> @and_vv_v128i16(<128 x i16> %x, <128 x i16> %y) {
+; CHECK-LABEL: and_vv_v128i16:
+; CHECK-NOT:       vand
+  %z = and <128 x i16> %x, %y
+  ret <128 x i16> %z
+}
+


        


More information about the llvm-branch-commits mailing list