[llvm] e291179 - [AArch64][GlobalISel] Selection support for v8s8, v4s16, v16s8 G_INSERT_VECTOR_ELT with GPR scalar

Vladislav Dzhidzhoev via llvm-commits llvm-commits at lists.llvm.org
Fri May 19 08:38:54 PDT 2023


Author: Vladislav Dzhidzhoev
Date: 2023-05-19T17:38:22+02:00
New Revision: e291179e2dde0846cfc0547948a2036e11ae02f1

URL: https://github.com/llvm/llvm-project/commit/e291179e2dde0846cfc0547948a2036e11ae02f1
DIFF: https://github.com/llvm/llvm-project/commit/e291179e2dde0846cfc0547948a2036e11ae02f1.diff

LOG: [AArch64][GlobalISel] Selection support for v8s8, v4s16, v16s8 G_INSERT_VECTOR_ELT with GPR scalar

This is to support some NEON intrinsics on GlobalISel.

Differential Revision: https://reviews.llvm.org/D146780

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-insert-vector-elt.mir
    llvm/test/CodeGen/AArch64/arm64-sminv.ll
    llvm/test/CodeGen/AArch64/arm64-umaxv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 5273beedcb83a..b18e7f729aea1 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -4445,7 +4445,10 @@ static std::pair<unsigned, unsigned>
 getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
   unsigned Opc, SubregIdx;
   if (RB.getID() == AArch64::GPRRegBankID) {
-    if (EltSize == 16) {
+    if (EltSize == 8) {
+      Opc = AArch64::INSvi8gpr;
+      SubregIdx = AArch64::bsub;
+    } else if (EltSize == 16) {
       Opc = AArch64::INSvi16gpr;
       SubregIdx = AArch64::ssub;
     } else if (EltSize == 32) {
@@ -5369,8 +5372,8 @@ bool AArch64InstructionSelector::selectInsertElt(MachineInstr &I,
   Register EltReg = I.getOperand(2).getReg();
   const LLT EltTy = MRI.getType(EltReg);
   unsigned EltSize = EltTy.getSizeInBits();
-  if (EltSize < 16 || EltSize > 64)
-    return false; // Don't support all element types yet.
+  if (EltSize < 8 || EltSize > 64)
+    return false;
 
   // Find the definition of the index. Bail out if it's not defined by a
   // G_CONSTANT.

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index e43c1ef521779..202e1fb70cfb5 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -698,7 +698,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .clampMaxNumElements(1, p0, 2);
 
   getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
-      .legalIf(typeInSet(0, {v8s16, v2s32, v4s32, v2s64}));
+      .legalIf(typeInSet(0, {v16s8, v8s8, v8s16, v4s16, v4s32, v2s32, v2s64}));
 
   getActionDefinitionsBuilder(G_BUILD_VECTOR)
       .legalFor({{v8s8, s8},

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index 328935b1a6ca1..1dae07bd07944 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -1,6 +1,63 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer %s -o - -global-isel-abort=1 | FileCheck %s
 
+---
+name:            v8s8
+body: |
+  bb.0:
+    liveins: $q0
+    ; CHECK-LABEL: name: v8s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK: %val:_(s8) = G_CONSTANT i8 42
+    ; CHECK: [[IVEC:%[0-9]+]]:_(<8 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], %val(s8), [[C]](s32)
+    ; CHECK: $d0 = COPY [[IVEC]](<8 x s8>)
+    ; CHECK: RET_ReallyLR
+    %0:_(<8 x s8>) = COPY $d0
+    %1:_(s32) = G_CONSTANT i32 1
+    %val:_(s8) = G_CONSTANT i8 42
+    %2:_(<8 x s8>) = G_INSERT_VECTOR_ELT %0(<8 x s8>), %val(s8), %1(s32)
+    $d0 = COPY %2(<8 x s8>)
+    RET_ReallyLR
+...
+---
+name:            v16s8
+body: |
+  bb.0:
+    liveins: $q0
+    ; CHECK-LABEL: name: v16s8
+    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK: %val:_(s8) = G_CONSTANT i8 42
+    ; CHECK: [[IVEC:%[0-9]+]]:_(<16 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], %val(s8), [[C]](s32)
+    ; CHECK: $q0 = COPY [[IVEC]](<16 x s8>)
+    ; CHECK: RET_ReallyLR
+    %0:_(<16 x s8>) = COPY $q0
+    %1:_(s32) = G_CONSTANT i32 1
+    %val:_(s8) = G_CONSTANT i8 42
+    %2:_(<16 x s8>) = G_INSERT_VECTOR_ELT %0(<16 x s8>), %val(s8), %1(s32)
+    $q0 = COPY %2(<16 x s8>)
+    RET_ReallyLR
+...
+---
+name:            v4s16
+body: |
+  bb.0:
+    liveins: $q0
+    ; CHECK-LABEL: name: v4s16
+    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK: %val:_(s16) = G_CONSTANT i16 42
+    ; CHECK: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], %val(s16), [[C]](s32)
+    ; CHECK: $d0 = COPY [[IVEC]](<4 x s16>)
+    ; CHECK: RET_ReallyLR
+    %0:_(<4 x s16>) = COPY $d0
+    %1:_(s32) = G_CONSTANT i32 1
+    %val:_(s16) = G_CONSTANT i16 42
+    %2:_(<4 x s16>) = G_INSERT_VECTOR_ELT %0(<4 x s16>), %val(s16), %1(s32)
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR
+...
 ---
 name:            v8s16
 body: |

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-vector-elt.mir
index a311e005a5741..13fd146f8744c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-vector-elt.mir
@@ -1,6 +1,61 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-unknown -run-pass=instruction-select %s -o - | FileCheck %s
 ---
+name:            v16s8_gpr
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $q1, $w0
+
+    ; CHECK-LABEL: name: v16s8_gpr
+    ; CHECK: liveins: $q1, $w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK: [[INSvi8gpr:%[0-9]+]]:fpr128 = INSvi8gpr [[COPY1]], 1, [[COPY]]
+    ; CHECK: $q0 = COPY [[INSvi8gpr]]
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0:gpr(s32) = COPY $w0
+    %trunc:gpr(s8) = G_TRUNC %0
+    %1:fpr(<16 x s8>) = COPY $q1
+    %3:gpr(s32) = G_CONSTANT i32 1
+    %2:fpr(<16 x s8>) = G_INSERT_VECTOR_ELT %1, %trunc:gpr(s8), %3:gpr(s32)
+    $q0 = COPY %2(<16 x s8>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v8s8_gpr
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $d0, $w0
+
+    ; CHECK-LABEL: name: v8s8_gpr
+    ; CHECK: liveins: $d0, $w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
+    ; CHECK: [[INSvi8gpr:%[0-9]+]]:fpr128 = INSvi8gpr [[INSERT_SUBREG]], 1, [[COPY]]
+    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi8gpr]].dsub
+    ; CHECK: $d0 = COPY [[COPY2]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:gpr(s32) = COPY $w0
+    %trunc:gpr(s8) = G_TRUNC %0
+    %1:fpr(<8 x s8>) = COPY $d0
+    %3:gpr(s32) = G_CONSTANT i32 1
+    %2:fpr(<8 x s8>) = G_INSERT_VECTOR_ELT %1, %trunc(s8), %3(s32)
+    $d0 = COPY %2(<8 x s8>)
+    RET_ReallyLR implicit $d0
+
+...
+---
 name:            v8s16_gpr
 alignment:       4
 legalized:       true
@@ -104,6 +159,35 @@ body:             |
     $q0 = COPY %2(<4 x s32>)
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v4s16_gpr
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $d0, $w0
+
+    ; CHECK-LABEL: name: v4s16_gpr
+    ; CHECK: liveins: $d0, $w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
+    ; CHECK: [[INSvi16gpr:%[0-9]+]]:fpr128 = INSvi16gpr [[INSERT_SUBREG]], 1, [[COPY]]
+    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi16gpr]].dsub
+    ; CHECK: $d0 = COPY [[COPY2]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:gpr(s32) = COPY $w0
+    %trunc:gpr(s16) = G_TRUNC %0
+    %1:fpr(<4 x s16>) = COPY $d0
+    %3:gpr(s32) = G_CONSTANT i32 1
+    %2:fpr(<4 x s16>) = G_INSERT_VECTOR_ELT %1, %trunc(s16), %3(s32)
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR implicit $d0
+
 ...
 ---
 name:            v2s64_fpr

diff  --git a/llvm/test/CodeGen/AArch64/arm64-sminv.ll b/llvm/test/CodeGen/AArch64/arm64-sminv.ll
index 15f874e4b4393..3f2296d726a96 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sminv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sminv.ll
@@ -1,10 +1,14 @@
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s --check-prefix=CHECK --check-prefix=SDAG
+; RUN: llc < %s -global-isel=1 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s --check-prefix=CHECK --check-prefix=GISEL
 
 define signext i8 @test_vminv_s8(<8 x i8> %a1) {
 ; CHECK: test_vminv_s8
 ; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v0
-; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: smov.b w0, v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.b w8, v[[REGNUM]][0]
+; GISEL-NEXT: sxtb  w0, w8
+; GISEL-NEXT: ret
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a1)
   %0 = trunc i32 %vminv.i to i8
@@ -14,8 +18,11 @@ entry:
 define signext i16 @test_vminv_s16(<4 x i16> %a1) {
 ; CHECK: test_vminv_s16
 ; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v0
-; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: smov.h w0, v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.h w8, v[[REGNUM]][0]
+; GISEL-NEXT: sxth  w0, w8
+; GISEL-NEXT: ret
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a1)
   %0 = trunc i32 %vminv.i to i16
@@ -36,8 +43,11 @@ entry:
 define signext i8 @test_vminvq_s8(<16 x i8> %a1) {
 ; CHECK: test_vminvq_s8
 ; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v0
-; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: smov.b w0, v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.b w8, v[[REGNUM]][0]
+; GISEL-NEXT: sxtb  w0, w8
+; GISEL-NEXT: ret
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a1)
   %0 = trunc i32 %vminv.i to i8
@@ -47,8 +57,11 @@ entry:
 define signext i16 @test_vminvq_s16(<8 x i16> %a1) {
 ; CHECK: test_vminvq_s16
 ; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v0
-; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: smov.h w0, v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.h w8, v[[REGNUM]][0]
+; GISEL-NEXT: sxth  w0, w8
+; GISEL-NEXT: ret
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a1)
   %0 = trunc i32 %vminv.i to i16
@@ -68,8 +81,11 @@ entry:
 define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vminv_s8_used_by_laneop:
 ; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: mov.b v0[3], v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.b  w8, v[[REGNUM]][0]
+; GISEL-NEXT: mov.b v0[3], w8
+; GISEL-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2)
   %1 = trunc i32 %0 to i8
@@ -80,8 +96,11 @@ entry:
 define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vminv_s16_used_by_laneop:
 ; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: mov.h v0[3], v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.h  w8, v[[REGNUM]][0]
+; GISEL-NEXT: mov.h v0[3], w8
+; GISEL-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2)
   %1 = trunc i32 %0 to i16
@@ -103,8 +122,11 @@ entry:
 define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vminvq_s8_used_by_laneop:
 ; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: mov.b v0[3], v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.b  w8, v[[REGNUM]][0]
+; GISEL-NEXT: mov.b v0[3], w8
+; GISEL-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2)
   %1 = trunc i32 %0 to i8
@@ -115,8 +137,11 @@ entry:
 define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vminvq_s16_used_by_laneop:
 ; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
+; SDAG-NEXT: mov.h v0[3], v[[REGNUM]][0]
+; SDAG-NEXT: ret
+; GISEL-NEXT: smov.h  w8, v[[REGNUM]][0]
+; GISEL-NEXT: mov.h v0[3], w8
+; GISEL-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2)
   %1 = trunc i32 %0 to i16

diff  --git a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
index ec9a1f0aae950..505dd1668104f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
@@ -1,11 +1,13 @@
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s --check-prefix CHECK --check-prefix SDAG
+; RUN: llc < %s -global-isel=1 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s --check-prefix CHECK --check-prefix GISEL
 
 define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp {
 ; CHECK-LABEL: vmax_u8x8:
 ; CHECK: umaxv.8b        b[[REG:[0-9]+]], v0
 ; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
 ; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
+; SDAG: cbz     [[REG2]],
+; GISEL: b
 entry:
   %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) nounwind
   %tmp = trunc i32 %vmaxv.i to i8
@@ -28,7 +30,8 @@ define i32 @vmax_u4x16(<4 x i16> %a) nounwind ssp {
 ; CHECK: umaxv.4h        h[[REG:[0-9]+]], v0
 ; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
 ; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
+; SDAG: cbz     [[REG2]],
+; GISEL: b
 entry:
   %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) nounwind
   %tmp = trunc i32 %vmaxv.i to i16
@@ -49,7 +52,8 @@ define i32 @vmax_u8x16(<8 x i16> %a) nounwind ssp {
 ; CHECK: umaxv.8h        h[[REG:[0-9]+]], v0
 ; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
 ; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
+; SDAG: cbz     [[REG2]],
+; GISEL: b
 entry:
   %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) nounwind
   %tmp = trunc i32 %vmaxv.i to i16
@@ -70,7 +74,8 @@ define i32 @vmax_u16x8(<16 x i8> %a) nounwind ssp {
 ; CHECK: umaxv.16b        b[[REG:[0-9]+]], v0
 ; CHECK: fmov     [[REG2:w[0-9]+]], s[[REG]]
 ; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
+; SDAG: cbz     [[REG2]],
+; GISEL: b
 entry:
   %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) nounwind
   %tmp = trunc i32 %vmaxv.i to i8


        


More information about the llvm-commits mailing list