[llvm] 7d6e4ed - [AArch64] Adjust dot produce tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 1 04:46:58 PST 2021


Author: David Green
Date: 2021-03-01T12:46:43Z
New Revision: 7d6e4ed1558fefae98cde41d4139131201bd2416

URL: https://github.com/llvm/llvm-project/commit/7d6e4ed1558fefae98cde41d4139131201bd2416
DIFF: https://github.com/llvm/llvm-project/commit/7d6e4ed1558fefae98cde41d4139131201bd2416.diff

LOG: [AArch64] Adjust dot produce tests. NFC

This regenerates and splits out the dotproduce tests, adding a few extra
tests for upcoming changes.

Added: 
    llvm/test/CodeGen/AArch64/neon-dotpattern.ll
    llvm/test/CodeGen/AArch64/neon-dotreduce.ll

Modified: 
    llvm/test/CodeGen/AArch64/neon-dot-product.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/neon-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
index 2f5ec6d48be6..1d82f93b32d1 100644
--- a/llvm/test/CodeGen/AArch64/neon-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod    < %s | FileCheck %s
 ; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65   < %s | FileCheck %s
 ; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65ae < %s | FileCheck %s
@@ -11,41 +12,106 @@ declare <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i
 declare <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>)
 
 define <2 x i32> @test_vdot_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
-entry:
 ; CHECK-LABEL: test_vdot_u32:
-; CHECK: udot v0.2s, v1.8b, v2.8b
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    udot v0.2s, v1.8b, v2.8b
+; CHECK-NEXT:    ret
+entry:
   %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2
   ret <2 x i32> %vdot1.i
 }
 
 define <4 x i32> @test_vdotq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
-entry:
 ; CHECK-LABEL: test_vdotq_u32:
-; CHECK: udot v0.4s, v1.16b, v2.16b
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    udot v0.4s, v1.16b, v2.16b
+; CHECK-NEXT:    ret
+entry:
   %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #2
   ret <4 x i32> %vdot1.i
 }
 
 define <2 x i32> @test_vdot_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
-entry:
 ; CHECK-LABEL: test_vdot_s32:
-; CHECK: sdot v0.2s, v1.8b, v2.8b
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sdot v0.2s, v1.8b, v2.8b
+; CHECK-NEXT:    ret
+entry:
   %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2
   ret <2 x i32> %vdot1.i
 }
 
 define <4 x i32> @test_vdotq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
-entry:
 ; CHECK-LABEL: test_vdotq_s32:
-; CHECK: sdot v0.4s, v1.16b, v2.16b
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sdot v0.4s, v1.16b, v2.16b
+; CHECK-NEXT:    ret
+entry:
   %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #2
   ret <4 x i32> %vdot1.i
 }
 
-define <2 x i32> @test_vdot_lane_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
+
+define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
+; CHECK-LABEL: test_vdot_u32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    udot v3.2s, v1.8b, v2.8b
+; CHECK-NEXT:    add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    ret
 entry:
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
+}
+
+define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
+; CHECK-LABEL: test_vdotq_u32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    udot v3.4s, v1.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
+}
+
+define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
+; CHECK-LABEL: test_vdot_s32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    sdot v3.2s, v1.8b, v2.8b
+; CHECK-NEXT:    add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    ret
+entry:
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
+}
+
+define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
+; CHECK-LABEL: test_vdotq_s32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    sdot v3.4s, v1.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
+}
+
+
+define <2 x i32> @test_vdot_lane_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
 ; CHECK-LABEL: test_vdot_lane_u32:
-; CHECK: udot v0.2s, v1.8b, v2.4b[1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    udot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    ret
+entry:
   %.cast = bitcast <8 x i8> %c to <2 x i32>
   %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
   %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -54,9 +120,12 @@ entry:
 }
 
 define <4 x i32> @test_vdotq_lane_u32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
-entry:
 ; CHECK-LABEL: test_vdotq_lane_u32:
-; CHECK:  udot v0.4s, v1.16b, v2.4b[1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    udot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    ret
+entry:
   %.cast = bitcast <8 x i8> %c to <2 x i32>
   %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -65,9 +134,11 @@ entry:
 }
 
 define <2 x i32> @test_vdot_laneq_u32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
-entry:
 ; CHECK-LABEL: test_vdot_laneq_u32:
-; CHECK:  udot v0.2s, v1.8b, v2.4b[1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    udot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    ret
+entry:
   %.cast = bitcast <16 x i8> %c to <4 x i32>
   %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
@@ -76,9 +147,11 @@ entry:
 }
 
 define <4 x i32> @test_vdotq_laneq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
-entry:
 ; CHECK-LABEL: test_vdotq_laneq_u32:
-; CHECK:  udot v0.4s, v1.16b, v2.4b[1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    udot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    ret
+entry:
   %.cast = bitcast <16 x i8> %c to <4 x i32>
   %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
@@ -86,216 +159,163 @@ entry:
   ret <4 x i32> %vdot1.i
 }
 
-define <2 x i32> @test_vdot_lane_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
+
+define <2 x i32> @test_vdot_lane_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
 entry:
-; CHECK-LABEL: test_vdot_lane_s32:
-; CHECK: sdot v0.2s, v1.8b, v2.4b[1]
   %.cast = bitcast <8 x i8> %c to <2 x i32>
   %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
   %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
-  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
-  ret <2 x i32> %vdot1.i
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %.cast5) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
 }
 
-define <4 x i32> @test_vdotq_lane_s32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
+define <4 x i32> @test_vdotq_lane_u32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
 entry:
-; CHECK-LABEL: test_vdotq_lane_s32:
-; CHECK:  sdot v0.4s, v1.16b, v2.4b[1]
   %.cast = bitcast <8 x i8> %c to <2 x i32>
   %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
-  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %.cast3) #2
-  ret <4 x i32> %vdot1.i
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %.cast3) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
 }
 
-define <2 x i32> @test_vdot_laneq_s32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
+define <2 x i32> @test_vdot_laneq_u32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdot_laneq_u32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    udot v3.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_vdot_laneq_s32:
-; CHECK:  sdot v0.2s, v1.8b, v2.4b[1]
   %.cast = bitcast <16 x i8> %c to <4 x i32>
   %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
-  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
-  ret <2 x i32> %vdot1.i
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %.cast5) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
 }
 
-define <4 x i32> @test_vdotq_laneq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
+define <4 x i32> @test_vdotq_laneq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdotq_laneq_u32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    udot v3.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_vdotq_laneq_s32:
-; CHECK:  sdot v0.4s, v1.16b, v2.4b[1]
   %.cast = bitcast <16 x i8> %c to <4 x i32>
   %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
-  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %.cast3) #2
-  ret <4 x i32> %vdot1.i
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %.cast3) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
 }
 
-define fastcc void @test_sdot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+
+define <2 x i32> @test_vdot_lane_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdot_lane_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    sdot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_sdot_v4i8:
-; CHECK:  sdot {{v[0-9]+}}.2s, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-  %3 = bitcast i8* %0 to i32*
-  %4 = load i8, i8* %1, align 1
-  %5 = sext i8 %4 to i32
-  %6 = load i8, i8* %2, align 1
-  %7 = sext i8 %6 to i32
-  %8 = mul nsw i32 %7, %5
-  %9 = getelementptr inbounds i8, i8* %1, i64 1
-  %10 = load i8, i8* %9, align 1
-  %11 = sext i8 %10 to i32
-  %12 = getelementptr inbounds i8, i8* %2, i64 1
-  %13 = load i8, i8* %12, align 1
-  %14 = sext i8 %13 to i32
-  %15 = mul nsw i32 %14, %11
-  %16 = add nsw i32 %15, %8
-  %17 = getelementptr inbounds i8, i8* %1, i64 2
-  %18 = load i8, i8* %17, align 1
-  %19 = sext i8 %18 to i32
-  %20 = getelementptr inbounds i8, i8* %2, i64 2
-  %21 = load i8, i8* %20, align 1
-  %22 = sext i8 %21 to i32
-  %23 = mul nsw i32 %22, %19
-  %24 = add nsw i32 %23, %16
-  %25 = getelementptr inbounds i8, i8* %1, i64 3
-  %26 = load i8, i8* %25, align 1
-  %27 = sext i8 %26 to i32
-  %28 = getelementptr inbounds i8, i8* %2, i64 3
-  %29 = load i8, i8* %28, align 1
-  %30 = sext i8 %29 to i32
-  %31 = mul nsw i32 %30, %27
-  %32 = add nsw i32 %31, %24
-  store i32 %32, i32* %3, align 64
-  ret void
+  %.cast = bitcast <8 x i8> %c to <2 x i32>
+  %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+  %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
+  ret <2 x i32> %vdot1.i
 }
 
-define fastcc void @test_udot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+define <4 x i32> @test_vdotq_lane_s32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdotq_lane_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    sdot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_udot_v4i8:
-; CHECK:  udot {{v[0-9]+}}.2s, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-  %3 = bitcast i8* %0 to i32*
-  %4 = load i8, i8* %1, align 1
-  %5 = zext i8 %4 to i32
-  %6 = load i8, i8* %2, align 1
-  %7 = zext i8 %6 to i32
-  %8 = mul nsw i32 %7, %5
-  %9 = getelementptr inbounds i8, i8* %1, i64 1
-  %10 = load i8, i8* %9, align 1
-  %11 = zext i8 %10 to i32
-  %12 = getelementptr inbounds i8, i8* %2, i64 1
-  %13 = load i8, i8* %12, align 1
-  %14 = zext i8 %13 to i32
-  %15 = mul nsw i32 %14, %11
-  %16 = add nsw i32 %15, %8
-  %17 = getelementptr inbounds i8, i8* %1, i64 2
-  %18 = load i8, i8* %17, align 1
-  %19 = zext i8 %18 to i32
-  %20 = getelementptr inbounds i8, i8* %2, i64 2
-  %21 = load i8, i8* %20, align 1
-  %22 = zext i8 %21 to i32
-  %23 = mul nsw i32 %22, %19
-  %24 = add nsw i32 %23, %16
-  %25 = getelementptr inbounds i8, i8* %1, i64 3
-  %26 = load i8, i8* %25, align 1
-  %27 = zext i8 %26 to i32
-  %28 = getelementptr inbounds i8, i8* %2, i64 3
-  %29 = load i8, i8* %28, align 1
-  %30 = zext i8 %29 to i32
-  %31 = mul nsw i32 %30, %27
-  %32 = add nsw i32 %31, %24
-  store i32 %32, i32* %3, align 64
-  ret void
+  %.cast = bitcast <8 x i8> %c to <2 x i32>
+  %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %.cast3) #2
+  ret <4 x i32> %vdot1.i
 }
 
-declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
-
-define i32 @test_udot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define <2 x i32> @test_vdot_laneq_s32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdot_laneq_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sdot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_udot_v8i8:
-; CHECK:  udot {{v[0-9]+}}.2s, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-  %0 = bitcast i8* %a to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %b to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = mul nuw nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %.cast = bitcast <16 x i8> %c to <4 x i32>
+  %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+  %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
+  ret <2 x i32> %vdot1.i
 }
 
-define i32 @test_sdot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define <4 x i32> @test_vdotq_laneq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdotq_laneq_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sdot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_sdot_v8i8:
-; CHECK:  sdot {{v[0-9]+}}.2s, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-  %0 = bitcast i8* %a to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = sext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %b to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3
-  %5 = sext <8 x i8> %4 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %.cast = bitcast <16 x i8> %c to <4 x i32>
+  %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %.cast3) #2
+  ret <4 x i32> %vdot1.i
 }
 
-declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
-define i32 @test_udot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+define <2 x i32> @test_vdot_lane_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
 entry:
-; CHECK-LABEL: test_udot_v16i8:
-; CHECK:  udot {{v[0-9]+}}.4s, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = zext <16 x i8> %4 to <16 x i32>
-  %6 = mul nuw nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  %op.extra = add i32 %7, %sum
-  ret i32 %op.extra
+  %.cast = bitcast <8 x i8> %c to <2 x i32>
+  %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+  %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %.cast5) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
 }
 
-define i32 @test_udot_v16i8_2(i8* nocapture readonly %a1) {
-; CHECK-LABEL: test_udot_v16i8_2:
-; CHECK:    movi {{v[0-9]+}}.16b, #1
-; CHECK:    movi {{v[0-9]+}}.2d, #0000000000000000
-; CHECK:    udot {{v[0-9]+}}.4s, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-; CHECK:    addv s0, {{v[0-9]+}}.4s
+define <4 x i32> @test_vdotq_lane_s32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
 entry:
-  %0 = bitcast i8* %a1 to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %.cast = bitcast <8 x i8> %c to <2 x i32>
+  %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %.cast3) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
 }
 
-define i32 @test_sdot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+define <2 x i32> @test_vdot_laneq_s32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdot_laneq_s32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    sdot v3.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT:    add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_sdot_v16i8:
-; CHECK:  sdot {{v[0-9]+}}.4s, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = sext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = sext <16 x i8> %4 to <16 x i32>
-  %6 = mul nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  %op.extra = add nsw i32 %7, %sum
-  ret i32 %op.extra
+  %.cast = bitcast <16 x i8> %c to <4 x i32>
+  %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+  %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+  %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %.cast5) #2
+  %ret = add <2 x i32> %vdot1.i, %a
+  ret <2 x i32> %ret
 }
 
-define i32 @test_sdot_v16i8_2(i8* nocapture readonly %a1) {
-; CHECK-LABEL: test_sdot_v16i8_2:
-; CHECK:    movi {{v[0-9]+}}.16b, #1
-; CHECK:    movi {{v[0-9]+}}.2d, #0000000000000000
-; CHECK:    sdot {{v[0-9]+}}.4s, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-; CHECK:    addv s0, {{v[0-9]+}}.4s
+define <4 x i32> @test_vdotq_laneq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: test_vdotq_laneq_s32_zero:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    sdot v3.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a1 to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = sext <16 x i8> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %.cast = bitcast <16 x i8> %c to <4 x i32>
+  %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
+  %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %.cast3) #2
+  %ret = add <4 x i32> %vdot1.i, %a
+  ret <4 x i32> %ret
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-dotpattern.ll b/llvm/test/CodeGen/AArch64/neon-dotpattern.ll
new file mode 100644
index 000000000000..5680fc585d25
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/neon-dotpattern.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod    < %s | FileCheck %s
+
+define fastcc void @test_sdot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+; CHECK-LABEL: test_sdot_v4i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    ldr w9, [x1]
+; CHECK-NEXT:    dup v0.2s, wzr
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    fmov s2, w9
+; CHECK-NEXT:    sdot v0.2s, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %3 = bitcast i8* %0 to i32*
+  %4 = load i8, i8* %1, align 1
+  %5 = sext i8 %4 to i32
+  %6 = load i8, i8* %2, align 1
+  %7 = sext i8 %6 to i32
+  %8 = mul nsw i32 %7, %5
+  %9 = getelementptr inbounds i8, i8* %1, i64 1
+  %10 = load i8, i8* %9, align 1
+  %11 = sext i8 %10 to i32
+  %12 = getelementptr inbounds i8, i8* %2, i64 1
+  %13 = load i8, i8* %12, align 1
+  %14 = sext i8 %13 to i32
+  %15 = mul nsw i32 %14, %11
+  %16 = add nsw i32 %15, %8
+  %17 = getelementptr inbounds i8, i8* %1, i64 2
+  %18 = load i8, i8* %17, align 1
+  %19 = sext i8 %18 to i32
+  %20 = getelementptr inbounds i8, i8* %2, i64 2
+  %21 = load i8, i8* %20, align 1
+  %22 = sext i8 %21 to i32
+  %23 = mul nsw i32 %22, %19
+  %24 = add nsw i32 %23, %16
+  %25 = getelementptr inbounds i8, i8* %1, i64 3
+  %26 = load i8, i8* %25, align 1
+  %27 = sext i8 %26 to i32
+  %28 = getelementptr inbounds i8, i8* %2, i64 3
+  %29 = load i8, i8* %28, align 1
+  %30 = sext i8 %29 to i32
+  %31 = mul nsw i32 %30, %27
+  %32 = add nsw i32 %31, %24
+  store i32 %32, i32* %3, align 64
+  ret void
+}
+
+define fastcc void @test_udot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+; CHECK-LABEL: test_udot_v4i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    ldr w9, [x1]
+; CHECK-NEXT:    dup v0.2s, wzr
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    fmov s2, w9
+; CHECK-NEXT:    udot v0.2s, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %3 = bitcast i8* %0 to i32*
+  %4 = load i8, i8* %1, align 1
+  %5 = zext i8 %4 to i32
+  %6 = load i8, i8* %2, align 1
+  %7 = zext i8 %6 to i32
+  %8 = mul nsw i32 %7, %5
+  %9 = getelementptr inbounds i8, i8* %1, i64 1
+  %10 = load i8, i8* %9, align 1
+  %11 = zext i8 %10 to i32
+  %12 = getelementptr inbounds i8, i8* %2, i64 1
+  %13 = load i8, i8* %12, align 1
+  %14 = zext i8 %13 to i32
+  %15 = mul nsw i32 %14, %11
+  %16 = add nsw i32 %15, %8
+  %17 = getelementptr inbounds i8, i8* %1, i64 2
+  %18 = load i8, i8* %17, align 1
+  %19 = zext i8 %18 to i32
+  %20 = getelementptr inbounds i8, i8* %2, i64 2
+  %21 = load i8, i8* %20, align 1
+  %22 = zext i8 %21 to i32
+  %23 = mul nsw i32 %22, %19
+  %24 = add nsw i32 %23, %16
+  %25 = getelementptr inbounds i8, i8* %1, i64 3
+  %26 = load i8, i8* %25, align 1
+  %27 = zext i8 %26 to i32
+  %28 = getelementptr inbounds i8, i8* %2, i64 3
+  %29 = load i8, i8* %28, align 1
+  %30 = zext i8 %29 to i32
+  %31 = mul nsw i32 %30, %27
+  %32 = add nsw i32 %31, %24
+  store i32 %32, i32* %3, align 64
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
new file mode 100644
index 000000000000..2b399f23376b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -0,0 +1,407 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod    < %s | FileCheck %s
+
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+
+define i32 @test_udot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+; CHECK-LABEL: test_udot_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    dup v2.2s, wzr
+; CHECK-NEXT:    udot v2.2s, v1.8b, v0.8b
+; CHECK-NEXT:    addp v0.2s, v2.2s, v2.2s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0
+  %2 = zext <8 x i8> %1 to <8 x i32>
+  %3 = bitcast i8* %b to <8 x i8>*
+  %4 = load <8 x i8>, <8 x i8>* %3
+  %5 = zext <8 x i8> %4 to <8 x i32>
+  %6 = mul nuw nsw <8 x i32> %5, %2
+  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
+  ret i32 %7
+}
+
+define i32 @test_udot_v8i8_nomla(i8* nocapture readonly %a1) {
+; CHECK-LABEL: test_udot_v8i8_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.4s, v0.4h, #0
+; CHECK-NEXT:    uaddw2 v0.4s, v1.4s, v0.8h
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a1 to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0
+  %2 = zext <8 x i8> %1 to <8 x i32>
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  ret i32 %3
+}
+
+define i32 @test_sdot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+; CHECK-LABEL: test_sdot_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    dup v2.2s, wzr
+; CHECK-NEXT:    sdot v2.2s, v1.8b, v0.8b
+; CHECK-NEXT:    addp v0.2s, v2.2s, v2.2s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0
+  %2 = sext <8 x i8> %1 to <8 x i32>
+  %3 = bitcast i8* %b to <8 x i8>*
+  %4 = load <8 x i8>, <8 x i8>* %3
+  %5 = sext <8 x i8> %4 to <8 x i32>
+  %6 = mul nsw <8 x i32> %5, %2
+  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
+  ret i32 %7
+}
+
+define i32 @test_sdot_v8i8_nomla(i8* nocapture readonly %a1) {
+; CHECK-LABEL: test_sdot_v8i8_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll v1.4s, v0.4h, #0
+; CHECK-NEXT:    saddw2 v0.4s, v1.4s, v0.8h
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a1 to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0
+  %2 = sext <8 x i8> %1 to <8 x i32>
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  ret i32 %3
+}
+
+
+define i32 @test_udot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+; CHECK-LABEL: test_udot_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    dup v2.4s, wzr
+; CHECK-NEXT:    udot v2.4s, v1.16b, v0.16b
+; CHECK-NEXT:    addv s0, v2.4s
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    add w0, w8, w2
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0
+  %2 = zext <16 x i8> %1 to <16 x i32>
+  %3 = bitcast i8* %b to <16 x i8>*
+  %4 = load <16 x i8>, <16 x i8>* %3
+  %5 = zext <16 x i8> %4 to <16 x i32>
+  %6 = mul nuw nsw <16 x i32> %5, %2
+  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
+  %op.extra = add i32 %7, %sum
+  ret i32 %op.extra
+}
+
+define i32 @test_udot_v16i8_nomla(i8* nocapture readonly %a1) {
+; CHECK-LABEL: test_udot_v16i8_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    movi v1.16b, #1
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    udot v2.4s, v1.16b, v0.16b
+; CHECK-NEXT:    addv s0, v2.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a1 to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0
+  %2 = zext <16 x i8> %1 to <16 x i32>
+  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
+  ret i32 %3
+}
+
+define i32 @test_sdot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+; CHECK-LABEL: test_sdot_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    dup v2.4s, wzr
+; CHECK-NEXT:    sdot v2.4s, v1.16b, v0.16b
+; CHECK-NEXT:    addv s0, v2.4s
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    add w0, w8, w2
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0
+  %2 = sext <16 x i8> %1 to <16 x i32>
+  %3 = bitcast i8* %b to <16 x i8>*
+  %4 = load <16 x i8>, <16 x i8>* %3
+  %5 = sext <16 x i8> %4 to <16 x i32>
+  %6 = mul nsw <16 x i32> %5, %2
+  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
+  %op.extra = add nsw i32 %7, %sum
+  ret i32 %op.extra
+}
+
+define i32 @test_sdot_v16i8_nomla(i8* nocapture readonly %a1) {
+; CHECK-LABEL: test_sdot_v16i8_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    movi v1.16b, #1
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    sdot v2.4s, v1.16b, v0.16b
+; CHECK-NEXT:    addv s0, v2.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = bitcast i8* %a1 to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0
+  %2 = sext <16 x i8> %1 to <16 x i32>
+  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
+  ret i32 %3
+}
+
+
+define i32 @test_udot_v8i8_double(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: test_udot_v8i8_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
+; CHECK-NEXT:    ushll v3.8h, v3.8b, #0
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ext v1.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    umull v2.4s, v2.4h, v3.4h
+; CHECK-NEXT:    ext v3.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    umlal v0.4s, v4.4h, v5.4h
+; CHECK-NEXT:    umlal v2.4s, v1.4h, v3.4h
+; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = zext <8 x i8> %a to <8 x i32>
+  %bz = zext <8 x i8> %b to <8 x i32>
+  %m1 = mul nuw nsw <8 x i32> %az, %bz
+  %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m1)
+  %cz = zext <8 x i8> %c to <8 x i32>
+  %dz = zext <8 x i8> %d to <8 x i32>
+  %m2 = mul nuw nsw <8 x i32> %cz, %dz
+  %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m2)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_udot_v8i8_double_nomla(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: test_udot_v8i8_double_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.8h, v2.8b, #0
+; CHECK-NEXT:    ushll v2.4s, v0.4h, #0
+; CHECK-NEXT:    ushll v3.4s, v1.4h, #0
+; CHECK-NEXT:    uaddw2 v0.4s, v2.4s, v0.8h
+; CHECK-NEXT:    uaddw2 v1.4s, v3.4s, v1.8h
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = zext <8 x i8> %a to <8 x i32>
+  %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %az)
+  %cz = zext <8 x i8> %c to <8 x i32>
+  %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %cz)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_udot_v16i8_double(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: test_udot_v16i8_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll2 v4.8h, v0.16b, #0
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll2 v5.8h, v1.16b, #0
+; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-NEXT:    ext v6.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v7.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    umull2 v16.4s, v0.8h, v1.8h
+; CHECK-NEXT:    umlal v16.4s, v6.4h, v7.4h
+; CHECK-NEXT:    ushll2 v6.8h, v2.16b, #0
+; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
+; CHECK-NEXT:    ushll2 v7.8h, v3.16b, #0
+; CHECK-NEXT:    ushll v3.8h, v3.8b, #0
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ext v1.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    umlal v0.4s, v4.4h, v5.4h
+; CHECK-NEXT:    ext v4.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    umull v5.4s, v2.4h, v3.4h
+; CHECK-NEXT:    umull2 v2.4s, v2.8h, v3.8h
+; CHECK-NEXT:    umlal v2.4s, v1.4h, v4.4h
+; CHECK-NEXT:    umlal v5.4s, v6.4h, v7.4h
+; CHECK-NEXT:    add v0.4s, v0.4s, v16.4s
+; CHECK-NEXT:    add v1.4s, v5.4s, v2.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = zext <16 x i8> %a to <16 x i32>
+  %bz = zext <16 x i8> %b to <16 x i32>
+  %m1 = mul nuw nsw <16 x i32> %az, %bz
+  %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m1)
+  %cz = zext <16 x i8> %c to <16 x i32>
+  %dz = zext <16 x i8> %d to <16 x i32>
+  %m2 = mul nuw nsw <16 x i32> %cz, %dz
+  %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m2)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_udot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: test_udot_v16i8_double_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.16b, #1
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    udot v4.4s, v1.16b, v0.16b
+; CHECK-NEXT:    udot v3.4s, v1.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v4.4s, v3.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = zext <16 x i8> %a to <16 x i32>
+  %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %az)
+  %cz = zext <16 x i8> %c to <16 x i32>
+  %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %cz)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_sdot_v8i8_double(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: test_sdot_v8i8_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll v1.8h, v1.8b, #0
+; CHECK-NEXT:    sshll v2.8h, v2.8b, #0
+; CHECK-NEXT:    sshll v3.8h, v3.8b, #0
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ext v5.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ext v1.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    smull v2.4s, v2.4h, v3.4h
+; CHECK-NEXT:    ext v3.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    smlal v0.4s, v4.4h, v5.4h
+; CHECK-NEXT:    smlal v2.4s, v1.4h, v3.4h
+; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = sext <8 x i8> %a to <8 x i32>
+  %bz = sext <8 x i8> %b to <8 x i32>
+  %m1 = mul nuw nsw <8 x i32> %az, %bz
+  %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m1)
+  %cz = sext <8 x i8> %c to <8 x i32>
+  %dz = sext <8 x i8> %d to <8 x i32>
+  %m2 = mul nuw nsw <8 x i32> %cz, %dz
+  %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m2)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_sdot_v8i8_double_nomla(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: test_sdot_v8i8_double_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll v1.8h, v2.8b, #0
+; CHECK-NEXT:    sshll v2.4s, v0.4h, #0
+; CHECK-NEXT:    sshll v3.4s, v1.4h, #0
+; CHECK-NEXT:    saddw2 v0.4s, v2.4s, v0.8h
+; CHECK-NEXT:    saddw2 v1.4s, v3.4s, v1.8h
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = sext <8 x i8> %a to <8 x i32>
+  %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %az)
+  %cz = sext <8 x i8> %c to <8 x i32>
+  %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %cz)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_sdot_v16i8_double(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: test_sdot_v16i8_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll2 v4.8h, v0.16b, #0
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll2 v5.8h, v1.16b, #0
+; CHECK-NEXT:    sshll v1.8h, v1.8b, #0
+; CHECK-NEXT:    ext v6.16b, v4.16b, v4.16b, #8
+; CHECK-NEXT:    ext v7.16b, v5.16b, v5.16b, #8
+; CHECK-NEXT:    smull2 v16.4s, v0.8h, v1.8h
+; CHECK-NEXT:    smlal v16.4s, v6.4h, v7.4h
+; CHECK-NEXT:    sshll2 v6.8h, v2.16b, #0
+; CHECK-NEXT:    sshll v2.8h, v2.8b, #0
+; CHECK-NEXT:    sshll2 v7.8h, v3.16b, #0
+; CHECK-NEXT:    sshll v3.8h, v3.8b, #0
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ext v1.16b, v6.16b, v6.16b, #8
+; CHECK-NEXT:    smlal v0.4s, v4.4h, v5.4h
+; CHECK-NEXT:    ext v4.16b, v7.16b, v7.16b, #8
+; CHECK-NEXT:    smull v5.4s, v2.4h, v3.4h
+; CHECK-NEXT:    smull2 v2.4s, v2.8h, v3.8h
+; CHECK-NEXT:    smlal v2.4s, v1.4h, v4.4h
+; CHECK-NEXT:    smlal v5.4s, v6.4h, v7.4h
+; CHECK-NEXT:    add v0.4s, v0.4s, v16.4s
+; CHECK-NEXT:    add v1.4s, v5.4s, v2.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = sext <16 x i8> %a to <16 x i32>
+  %bz = sext <16 x i8> %b to <16 x i32>
+  %m1 = mul nuw nsw <16 x i32> %az, %bz
+  %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m1)
+  %cz = sext <16 x i8> %c to <16 x i32>
+  %dz = sext <16 x i8> %d to <16 x i32>
+  %m2 = mul nuw nsw <16 x i32> %cz, %dz
+  %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m2)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}
+
+define i32 @test_sdot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: test_sdot_v16i8_double_nomla:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.16b, #1
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    sdot v4.4s, v1.16b, v0.16b
+; CHECK-NEXT:    sdot v3.4s, v1.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v4.4s, v3.4s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %az = sext <16 x i8> %a to <16 x i32>
+  %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %az)
+  %cz = sext <16 x i8> %c to <16 x i32>
+  %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %cz)
+  %x = add i32 %r1, %r2
+  ret i32 %x
+}


        


More information about the llvm-commits mailing list