[llvm] 72305a0 - [llvm] [DAG] Fix bug in llvm.get.active.lane.mask lowering

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 26 10:16:48 PDT 2020


Author: aartbik
Date: 2020-08-26T10:16:31-07:00
New Revision: 72305a08ffcb2da10a33732adfaa8757ba70904f

URL: https://github.com/llvm/llvm-project/commit/72305a08ffcb2da10a33732adfaa8757ba70904f
DIFF: https://github.com/llvm/llvm-project/commit/72305a08ffcb2da10a33732adfaa8757ba70904f.diff

LOG: [llvm] [DAG] Fix bug in llvm.get.active.lane.mask lowering

This intrinsic only accepted proper machine vector lengths.
Fixed by this change. With unit tests.

https://bugs.llvm.org/show_bug.cgi?id=47299

Reviewed By: SjoerdMeijer

Differential Revision: https://reviews.llvm.org/D86585

Added: 
    llvm/test/CodeGen/X86/pr47299.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 38d11e4cd059..70af1891cbf4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6901,13 +6901,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     for (unsigned i = 0; i < VecWidth; i++) {
       OpsTripCount.push_back(TripCount);
       OpsIndex.push_back(Index);
-      OpsStepConstants.push_back(DAG.getConstant(i, DL, MVT::getVT(ElementTy)));
+      OpsStepConstants.push_back(
+          DAG.getConstant(i, DL, EVT::getEVT(ElementTy)));
     }
 
-    EVT CCVT = MVT::i1;
-    CCVT = EVT::getVectorVT(I.getContext(), CCVT, VecWidth);
+    EVT CCVT = EVT::getVectorVT(I.getContext(), MVT::i1, VecWidth);
 
-    auto VecTy = MVT::getVT(FixedVectorType::get(ElementTy, VecWidth));
+    auto VecTy = EVT::getEVT(FixedVectorType::get(ElementTy, VecWidth));
     SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex);
     SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants);
     SDValue VectorInduction = DAG.getNode(

diff  --git a/llvm/test/CodeGen/X86/pr47299.ll b/llvm/test/CodeGen/X86/pr47299.ll
new file mode 100644
index 000000000000..2f5d07802c7c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr47299.ll
@@ -0,0 +1,139 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -x86-asm-syntax=intel -mtriple=x86_64-linux-generic-march=x86-64 -mcpu=skylake-avx512 < %s | FileCheck %s
+
+declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64, i64)
+declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
+declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
+declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64, i64)
+declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
+declare <64 x i1> @llvm.get.active.lane.mask.v64i1.i32(i32, i32)
+
+define <7 x i1> @create_mask7(i64 %0) {
+; CHECK-LABEL: create_mask7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mov rax, rdi
+; CHECK-NEXT:    vpbroadcastq zmm0, rsi
+; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kshiftrb k1, k0, 6
+; CHECK-NEXT:    kmovd r8d, k1
+; CHECK-NEXT:    kshiftrb k1, k0, 5
+; CHECK-NEXT:    kmovd r9d, k1
+; CHECK-NEXT:    kshiftrb k1, k0, 4
+; CHECK-NEXT:    kmovd r10d, k1
+; CHECK-NEXT:    kshiftrb k1, k0, 3
+; CHECK-NEXT:    kmovd edi, k1
+; CHECK-NEXT:    kshiftrb k1, k0, 2
+; CHECK-NEXT:    kmovd ecx, k1
+; CHECK-NEXT:    kshiftrb k1, k0, 1
+; CHECK-NEXT:    kmovd edx, k1
+; CHECK-NEXT:    kmovd esi, k0
+; CHECK-NEXT:    and sil, 1
+; CHECK-NEXT:    and dl, 1
+; CHECK-NEXT:    add dl, dl
+; CHECK-NEXT:    or dl, sil
+; CHECK-NEXT:    and cl, 1
+; CHECK-NEXT:    shl cl, 2
+; CHECK-NEXT:    or cl, dl
+; CHECK-NEXT:    and dil, 1
+; CHECK-NEXT:    shl dil, 3
+; CHECK-NEXT:    or dil, cl
+; CHECK-NEXT:    and r10b, 1
+; CHECK-NEXT:    shl r10b, 4
+; CHECK-NEXT:    or r10b, dil
+; CHECK-NEXT:    and r9b, 1
+; CHECK-NEXT:    shl r9b, 5
+; CHECK-NEXT:    or r9b, r10b
+; CHECK-NEXT:    shl r8b, 6
+; CHECK-NEXT:    or r8b, r9b
+; CHECK-NEXT:    and r8b, 127
+; CHECK-NEXT:    mov byte ptr [rax], r8b
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret
+  %2 = call <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64 0, i64 %0)
+  ret <7 x i1> %2
+}
+
+define <16 x i1> @create_mask16(i64 %0) {
+; CHECK-LABEL: create_mask16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastq zmm0, rdi
+; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k0, k1, k0
+; CHECK-NEXT:    vpmovm2b xmm0, k0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret
+  %2 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 0, i64 %0)
+  ret <16 x i1> %2
+}
+
+define <32 x i1> @create_mask32(i64 %0) {
+; CHECK-LABEL: create_mask32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastq zmm0, rdi
+; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k0, k1, k0
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k1, k1, k2
+; CHECK-NEXT:    kunpckwd k0, k1, k0
+; CHECK-NEXT:    vpmovm2b ymm0, k0
+; CHECK-NEXT:    ret
+  %2 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 0, i64 %0)
+  ret <32 x i1> %2
+}
+
+define <64 x i1> @create_mask64(i64 %0) {
+; CHECK-LABEL: create_mask64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastq zmm0, rdi
+; CHECK-NEXT:    vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k0, k1, k0
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k1, k1, k2
+; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckwd k0, k1, k0
+; CHECK-NEXT:    vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k1, k1, k2
+; CHECK-NEXT:    vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckbw k2, k3, k2
+; CHECK-NEXT:    kunpckwd k1, k2, k1
+; CHECK-NEXT:    kunpckdq k0, k1, k0
+; CHECK-NEXT:    vpmovm2b zmm0, k0
+; CHECK-NEXT:    ret
+  %2 = call <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64 0, i64 %0)
+  ret <64 x i1> %2
+}
+
+define <16 x i1> @create_mask16_i32(i32 %0) {
+; CHECK-LABEL: create_mask16_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastd zmm0, edi
+; CHECK-NEXT:    vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpmovm2b xmm0, k0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    ret
+  %2 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 0, i32 %0)
+  ret <16 x i1> %2
+}
+
+define <64 x i1> @create_mask64_i32(i32 %0) {
+; CHECK-LABEL: create_mask64_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastd zmm0, edi
+; CHECK-NEXT:    vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckwd k0, k1, k0
+; CHECK-NEXT:    vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT:    kunpckwd k1, k1, k2
+; CHECK-NEXT:    kunpckdq k0, k1, k0
+; CHECK-NEXT:    vpmovm2b zmm0, k0
+; CHECK-NEXT:    ret
+  %2 = call <64 x i1> @llvm.get.active.lane.mask.v64i1.i32(i32 0, i32 %0)
+  ret <64 x i1> %2
+}


        


More information about the llvm-commits mailing list