[llvm] 77ae9b3 - [AArch64] Extra negated shift tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 25 11:17:55 PDT 2021


Author: David Green
Date: 2021-06-25T19:17:31+01:00
New Revision: 77ae9b364a9d9b99501163761313cefbb345cea7

URL: https://github.com/llvm/llvm-project/commit/77ae9b364a9d9b99501163761313cefbb345cea7
DIFF: https://github.com/llvm/llvm-project/commit/77ae9b364a9d9b99501163761313cefbb345cea7.diff

LOG: [AArch64] Extra negated shift tests. NFC

Added: 
    llvm/test/CodeGen/AArch64/sub-splat-sub.ll

Modified: 
    llvm/test/CodeGen/AArch64/neon-shift-neg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/neon-shift-neg.ll b/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
index 00fdb022bb2c..ecfa4308ffc8 100644
--- a/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
+++ b/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon,+sve | FileCheck %s
 
 define <2 x i64> @shr64x2(<2 x i64> %a, i64 %b) {
 ; CHECK-LABEL: shr64x2:
@@ -144,6 +144,149 @@ entry:
   ret <8 x i8> %shr
 }
 
+define <2 x i64> @lshr64x2(<2 x i64> %a, i64 %b) {
+; CHECK-LABEL: lshr64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg x8, x0
+; CHECK-NEXT:    dup v1.2d, x8
+; CHECK-NEXT:    neg v1.2d, v1.2d
+; CHECK-NEXT:    ushl v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i64 0, %b
+  %splat.splatinsert = insertelement <2 x i64> poison, i64 %sub, i32 0
+  %splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> poison, <2 x i32> zeroinitializer
+  %shr = lshr <2 x i64> %a, %splat.splat
+  ret <2 x i64> %shr
+}
+
+define <4 x i32> @lshr32x4(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: lshr32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.4s, w8
+; CHECK-NEXT:    neg v1.4s, v1.4s
+; CHECK-NEXT:    ushl v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <4 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+  %shr = lshr <4 x i32> %a, %splat.splat
+  ret <4 x i32> %shr
+}
+
+define <4 x i32> @lshr32x4undef(<4 x i32> %a, i32 %b) {
+; CHECK-LABEL: lshr32x4undef:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.4s, w8
+; CHECK-NEXT:    neg v1.4s, v1.4s
+; CHECK-NEXT:    ushl v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <4 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> poison, <4 x i32> <i32 undef, i32 0, i32 0, i32 0>
+  %shr = lshr <4 x i32> %a, %splat.splat
+  ret <4 x i32> %shr
+}
+
+define <8 x i16> @lshr16x8(<8 x i16> %a, i16 %b) {
+; CHECK-LABEL: lshr16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.8h, w8
+; CHECK-NEXT:    neg v1.8h, v1.8h
+; CHECK-NEXT:    ushl v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i16 0, %b
+  %0 = insertelement <8 x i16> undef, i16 %sub, i32 0
+  %sh_prom = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
+  %shr = lshr <8 x i16> %a, %sh_prom
+  ret <8 x i16> %shr
+}
+
+define <16 x i8> @lshr8x16(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: lshr8x16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.16b, w8
+; CHECK-NEXT:    neg v1.16b, v1.16b
+; CHECK-NEXT:    ushl v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+  %shr = lshr <16 x i8> %a, %sh_prom
+  ret <16 x i8> %shr
+}
+
+define <1 x i64> @lshr64x1(<1 x i64> %a, i64 %b) {
+; CHECK-LABEL: lshr64x1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg x8, x0
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    neg d1, d1
+; CHECK-NEXT:    ushl d0, d0, d1
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i64 0, %b
+  %splat.splatinsert = insertelement <1 x i64> poison, i64 %sub, i32 0
+  %shr = lshr <1 x i64> %a, %splat.splatinsert
+  ret <1 x i64> %shr
+}
+
+define <2 x i32> @lshr32x2(<2 x i32> %a, i32 %b) {
+; CHECK-LABEL: lshr32x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.2s, w8
+; CHECK-NEXT:    neg v1.2s, v1.2s
+; CHECK-NEXT:    ushl v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <2 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+  %shr = lshr <2 x i32> %a, %splat.splat
+  ret <2 x i32> %shr
+}
+
+define <4 x i16> @lshr16x4(<4 x i16> %a, i16 %b) {
+; CHECK-LABEL: lshr16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.4h, w8
+; CHECK-NEXT:    neg v1.4h, v1.4h
+; CHECK-NEXT:    ushl v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i16 0, %b
+  %0 = insertelement <4 x i16> undef, i16 %sub, i32 0
+  %sh_prom = shufflevector <4 x i16> %0, <4 x i16> undef, <4 x i32> zeroinitializer
+  %shr = lshr <4 x i16> %a, %sh_prom
+  ret <4 x i16> %shr
+}
+
+define <8 x i8> @lshr8x8(<8 x i8> %a, i8 %b) {
+; CHECK-LABEL: lshr8x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v1.8b, w8
+; CHECK-NEXT:    neg v1.8b, v1.8b
+; CHECK-NEXT:    ushl v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <8 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
+  %shr = lshr <8 x i8> %a, %sh_prom
+  ret <8 x i8> %shr
+}
+
 define <2 x i64> @shl64x2(<2 x i64> %a, i64 %b) {
 ; CHECK-LABEL: shl64x2:
 ; CHECK:       // %bb.0: // %entry
@@ -262,3 +405,212 @@ entry:
   %shl = shl <8 x i8> %a, %sh_prom
   ret <8 x i8> %shl
 }
+
+
+
+define <vscale x 2 x i64> @shrn64x2(<vscale x 2 x i64> %a, i64 %b) {
+; CHECK-LABEL: shrn64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg x8, x0
+; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i64 0, %b
+  %splat.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 2 x i64> %splat.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %shr = ashr <vscale x 2 x i64> %a, %splat.splat
+  ret <vscale x 2 x i64> %shr
+}
+
+define <vscale x 4 x i32> @shrn32x4(<vscale x 4 x i32> %a, i32 %b) {
+; CHECK-LABEL: shrn32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 4 x i32> %splat.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %shr = ashr <vscale x 4 x i32> %a, %splat.splat
+  ret <vscale x 4 x i32> %shr
+}
+
+define <vscale x 8 x i16> @shrn16x8(<vscale x 8 x i16> %a, i16 %b) {
+; CHECK-LABEL: shrn16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i16 0, %b
+  %0 = insertelement <vscale x 8 x i16> undef, i16 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %shr = ashr <vscale x 8 x i16> %a, %sh_prom
+  ret <vscale x 8 x i16> %shr
+}
+
+define <vscale x 16 x i8> @shrn8x16(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: shrn8x16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %shr = ashr <vscale x 16 x i8> %a, %sh_prom
+  ret <vscale x 16 x i8> %shr
+}
+
+define <vscale x 2 x i64> @lshrn64x2(<vscale x 2 x i64> %a, i64 %b) {
+; CHECK-LABEL: lshrn64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg x8, x0
+; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i64 0, %b
+  %splat.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 2 x i64> %splat.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %shr = lshr <vscale x 2 x i64> %a, %splat.splat
+  ret <vscale x 2 x i64> %shr
+}
+
+define <vscale x 4 x i32> @lshrn32x4(<vscale x 4 x i32> %a, i32 %b) {
+; CHECK-LABEL: lshrn32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 4 x i32> %splat.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %shr = lshr <vscale x 4 x i32> %a, %splat.splat
+  ret <vscale x 4 x i32> %shr
+}
+
+define <vscale x 8 x i16> @lshrn16x8(<vscale x 8 x i16> %a, i16 %b) {
+; CHECK-LABEL: lshrn16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i16 0, %b
+  %0 = insertelement <vscale x 8 x i16> undef, i16 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %shr = lshr <vscale x 8 x i16> %a, %sh_prom
+  ret <vscale x 8 x i16> %shr
+}
+
+define <vscale x 16 x i8> @lshrn8x16(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: lshrn8x16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %shr = lshr <vscale x 16 x i8> %a, %sh_prom
+  ret <vscale x 16 x i8> %shr
+}
+
+define <vscale x 2 x i64> @shln64x2(<vscale x 2 x i64> %a, i64 %b) {
+; CHECK-LABEL: shln64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg x8, x0
+; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i64 0, %b
+  %splat.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 2 x i64> %splat.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %shl = shl <vscale x 2 x i64> %a, %splat.splat
+  ret <vscale x 2 x i64> %shl
+}
+
+define <vscale x 4 x i32> @shln32x4(<vscale x 4 x i32> %a, i32 %b) {
+; CHECK-LABEL: shln32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub nsw i32 0, %b
+  %splat.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %sub, i32 0
+  %splat.splat = shufflevector <vscale x 4 x i32> %splat.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %shl = shl <vscale x 4 x i32> %a, %splat.splat
+  ret <vscale x 4 x i32> %shl
+}
+
+define <vscale x 8 x i16> @shln16x8(<vscale x 8 x i16> %a, i16 %b) {
+; CHECK-LABEL: shln16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i16 0, %b
+  %0 = insertelement <vscale x 8 x i16> undef, i16 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %shl = shl <vscale x 8 x i16> %a, %sh_prom
+  ret <vscale x 8 x i16> %shl
+}
+
+define <vscale x 16 x i8> @shln8x16(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: shln8x16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z1.b, w8
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %shl = shl <vscale x 16 x i8> %a, %sh_prom
+  ret <vscale x 16 x i8> %shl
+}
+
+define <vscale x 16 x i8> @subsub(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: subsub:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z0.b, w8
+; CHECK-NEXT:    subr z0.b, z0.b, #0 // =0x0
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %sub2 = sub <vscale x 16 x i8> zeroinitializer, %sh_prom
+  ret <vscale x 16 x i8> %sub2
+}

diff  --git a/llvm/test/CodeGen/AArch64/sub-splat-sub.ll b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
new file mode 100644
index 000000000000..8a5c6166a3f8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
@@ -0,0 +1,32 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon,+sve | FileCheck %s
+
+define <16 x i8> @subsubii8(<16 x i8> %a, i8 %b) {
+; CHECK-LABEL: subsubii8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    dup v0.16b, w8
+; CHECK-NEXT:    neg v0.16b, v0.16b
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+  %sub2 = sub <16 x i8> zeroinitializer, %sh_prom
+  ret <16 x i8> %sub2
+}
+
+define <vscale x 16 x i8> @subsubni8(<vscale x 16 x i8> %a, i8 %b) {
+; CHECK-LABEL: subsubni8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    neg w8, w0
+; CHECK-NEXT:    mov z0.b, w8
+; CHECK-NEXT:    subr z0.b, z0.b, #0 // =0x0
+; CHECK-NEXT:    ret
+entry:
+  %sub = sub i8 0, %b
+  %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
+  %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %sub2 = sub <vscale x 16 x i8> zeroinitializer, %sh_prom
+  ret <vscale x 16 x i8> %sub2
+}


        


More information about the llvm-commits mailing list