[llvm] r252364 - [AArch64][FastISel] Don't even try to select vector icmps.

Ahmed Bougacha via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 6 15:16:54 PST 2015


Author: ab
Date: Fri Nov  6 17:16:53 2015
New Revision: 252364

URL: http://llvm.org/viewvc/llvm-project?rev=252364&view=rev
Log:
[AArch64][FastISel] Don't even try to select vector icmps.

We used to try to constant-fold them to i32 immediates.
Given that fast-isel doesn't otherwise support vNi1, when selecting
the result users, we'd fallback to SDAG anyway.
However, if the users were in another block, we'd insert broken
cross-class copies (GPR32 to FPR64).

Give up, let SDAG agree with itself on a vNi1 legalization strategy.

Added:
    llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp?rev=252364&r1=252363&r2=252364&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp Fri Nov  6 17:16:53 2015
@@ -2451,6 +2451,10 @@ bool AArch64FastISel::selectIndirectBr(c
 bool AArch64FastISel::selectCmp(const Instruction *I) {
   const CmpInst *CI = cast<CmpInst>(I);
 
+  // Vectors of i1 are weird: bail out.
+  if (CI->getType()->isVectorTy())
+    return false;
+
   // Try to optimize or fold the cmp.
   CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
   unsigned ResultReg = 0;

Added: llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll?rev=252364&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/fast-isel-cmp-vec.ll Fri Nov  6 17:16:53 2015
@@ -0,0 +1,100 @@
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -verify-machineinstrs \
+; RUN:   -aarch64-atomic-cfg-tidy=0 -disable-cgp -disable-branch-fold \
+; RUN:   < %s | FileCheck %s
+
+;
+; Verify that we don't mess up vector comparisons in fast-isel.
+;
+
+define <2 x i32> @icmp_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: icmp_v2i32:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  cmeq.2s [[CMP:v[0-9]+]], v0, #0
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.2s [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.8b v0, [[CMP]], [[MASK]]
+; CHECK-NEXT:  ret
+  %c = icmp eq <2 x i32> %a, zeroinitializer
+  br label %bb2
+bb2:
+  %z = zext <2 x i1> %c to <2 x i32>
+  ret <2 x i32> %z
+}
+
+define <2 x i32> @icmp_constfold_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: icmp_constfold_v2i32:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  movi d[[CMP:[0-9]+]], #0xffffffffffffffff
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.2s [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.8b v0, v[[CMP]], [[MASK]]
+; CHECK-NEXT:  ret
+  %1 = icmp eq <2 x i32> %a, %a
+  br label %bb2
+bb2:
+  %2 = zext <2 x i1> %1 to <2 x i32>
+  ret <2 x i32> %2
+}
+
+define <4 x i32> @icmp_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: icmp_v4i32:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  cmeq.4s [[CMP:v[0-9]+]], v0, #0
+; CHECK-NEXT:  xtn.4h [[CMPV4I16:v[0-9]+]], [[CMP]]
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.4h [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.8b [[ZEXT:v[0-9]+]], [[CMPV4I16]], [[MASK]]
+; CHECK-NEXT:  ushll.4s v0, [[ZEXT]], #0
+; CHECK-NEXT:  ret
+  %c = icmp eq <4 x i32> %a, zeroinitializer
+  br label %bb2
+bb2:
+  %z = zext <4 x i1> %c to <4 x i32>
+  ret <4 x i32> %z
+}
+
+define <4 x i32> @icmp_constfold_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: icmp_constfold_v4i32:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  movi d[[CMP:[0-9]+]], #0xffffffffffffffff
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.4h [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.8b [[ZEXT:v[0-9]+]], v[[CMP]], [[MASK]]
+; CHECK-NEXT:  ushll.4s v0, [[ZEXT]], #0
+; CHECK-NEXT:  ret
+  %1 = icmp eq <4 x i32> %a, %a
+  br label %bb2
+bb2:
+  %2 = zext <4 x i1> %1 to <4 x i32>
+  ret <4 x i32> %2
+}
+
+define <16 x i8> @icmp_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: icmp_v16i8:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  cmeq.16b [[CMP:v[0-9]+]], v0, #0
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.16b [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.16b v0, [[CMP]], [[MASK]]
+; CHECK-NEXT:  ret
+  %c = icmp eq <16 x i8> %a, zeroinitializer
+  br label %bb2
+bb2:
+  %z = zext <16 x i1> %c to <16 x i8>
+  ret <16 x i8> %z
+}
+
+define <16 x i8> @icmp_constfold_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: icmp_constfold_v16i8:
+; CHECK:      ; BB#0:
+; CHECK-NEXT:  movi.2d [[CMP:v[0-9]+]], #0xffffffffffffffff
+; CHECK-NEXT: ; BB#1:
+; CHECK-NEXT:  movi.16b [[MASK:v[0-9]+]], #0x1
+; CHECK-NEXT:  and.16b v0, [[CMP]], [[MASK]]
+; CHECK-NEXT:  ret
+  %1 = icmp eq <16 x i8> %a, %a
+  br label %bb2
+bb2:
+  %2 = zext <16 x i1> %1 to <16 x i8>
+  ret <16 x i8> %2
+}




More information about the llvm-commits mailing list