[llvm] cd22e7c - [AArch64] Regenerate neon-vcmla.ll tests and add tests for combining fadd with vcmla. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 20 09:29:34 PDT 2023


Author: David Green
Date: 2023-03-20T16:29:28Z
New Revision: cd22e7c3ad9885d3a9cf990a78a8a4127420da06

URL: https://github.com/llvm/llvm-project/commit/cd22e7c3ad9885d3a9cf990a78a8a4127420da06
DIFF: https://github.com/llvm/llvm-project/commit/cd22e7c3ad9885d3a9cf990a78a8a4127420da06.diff

LOG: [AArch64] Regenerate neon-vcmla.ll tests and add tests for combining fadd with vcmla. NFC

See D146407.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/neon-vcmla.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/neon-vcmla.ll b/llvm/test/CodeGen/AArch64/neon-vcmla.ll
index 700e17e4b647d..76c4743de0c0c 100644
--- a/llvm/test/CodeGen/AArch64/neon-vcmla.ll
+++ b/llvm/test/CodeGen/AArch64/neon-vcmla.ll
@@ -1,19 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc %s -mtriple=aarch64 -mattr=+v8.3a,+fullfp16 -o - | FileCheck %s
 
 define <4 x half> @test_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.4h, #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_16x4
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #0
-;
   %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot0.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
   ret <4 x half> %res
 }
 
 define <4 x half> @test_16x4_lane_1(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_16x4_lane_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.h[1], #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_16x4_lane_1
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[1], #0
-;
   %c.cast = bitcast <4 x half> %c to <2 x i32>
   %c.dup = shufflevector <2 x i32> %c.cast , <2 x i32> undef, <2 x i32> <i32 1, i32 1>
   %c.res = bitcast <2 x i32> %c.dup to <4 x half>
@@ -22,19 +26,22 @@ entry:
 }
 
 define <4 x half> @test_rot90_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_rot90_16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.4h, #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_16x4
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #90
-;
   %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot90.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
   ret <4 x half> %res
 }
 
 define <4 x half> @test_rot90_16x4_lane_0(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_rot90_16x4_lane_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.h[0], #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_16x4_lane_0
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[0], #90
-;
   %c.cast = bitcast <4 x half> %c to <2 x i32>
   %c.dup = shufflevector <2 x i32> %c.cast , <2 x i32> undef, <2 x i32> <i32 0, i32 0>
   %c.res = bitcast <2 x i32> %c.dup to <4 x half>
@@ -43,18 +50,21 @@ entry:
 }
 
 define <4 x half> @test_rot180_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_rot180_16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.4h, #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_16x4
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #180
-;
   %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot180.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
   ret <4 x half> %res
 }
 
 define <4 x half> @test_rot180_16x4_lane_0(<4 x half> %a, <4 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot180_16x4_lane_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.h[0], #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_16x4_lane_0
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[0], #180
 
   %c.cast = bitcast <8 x half> %c to <4 x i32>
   %c.dup = shufflevector <4 x i32> %c.cast , <4 x i32> undef, <2 x i32> <i32 0, i32 0>
@@ -64,64 +74,71 @@ entry:
 }
 
 define <4 x half> @test_rot270_16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_rot270_16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4h, v1.4h, v2.4h, #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_16x4
-; CHECK: fcmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, #270
-;
   %res = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot270.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
   ret <4 x half> %res
 }
 
 define <2 x float> @test_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: test_32x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2s, v1.2s, v2.2s, #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_32x2
-; CHECK: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #0
-;
   %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot0.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c)
   ret <2 x float> %res
 }
 
 define <2 x float> @test_rot90_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: test_rot90_32x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2s, v1.2s, v2.2s, #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_32x2
-; CHECK: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #90
-;
   %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot90.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c)
   ret <2 x float> %res
 }
 
 define <2 x float> @test_rot180_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: test_rot180_32x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2s, v1.2s, v2.2s, #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_32x2
-; CHECK: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #180
-;
   %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot180.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c)
   ret <2 x float> %res
 }
 
 define <2 x float> @test_rot270_32x2(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: test_rot270_32x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2s, v1.2s, v2.2s, #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_32x2
-; CHECK: fcmla v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #270
-;
   %res = tail call <2 x float> @llvm.aarch64.neon.vcmla.rot270.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c)
   ret <2 x float> %res
 }
 
 define <8 x half> @test_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.8h, #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_16x8
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #0
-;
   %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot0.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c)
   ret <8 x half> %res
 }
 
 define <8 x half> @test_16x8_lane_0(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_16x8_lane_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.h[0], #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_16x8_lane_0
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[0], #0
-;
   %c.cast = bitcast <8 x half> %c to <4 x i32>
   %c.dup = shufflevector <4 x i32> %c.cast , <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
   %c.res = bitcast <4 x i32> %c.dup to <8 x half>
@@ -130,19 +147,21 @@ entry:
 }
 
 define <8 x half> @test_rot90_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot90_16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.8h, #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_16x8
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #90
-;
   %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot90.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c)
   ret <8 x half> %res
 }
 
 define <8 x half> @test_rot90_16x8_lane_1(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot90_16x8_lane_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.h[1], #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_16x8_lane_1
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[1], #90
-;
   %c.cast = bitcast <8 x half> %c to <4 x i32>
   %c.dup = shufflevector <4 x i32> %c.cast , <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %c.res = bitcast <4 x i32> %c.dup to <8 x half>
@@ -151,19 +170,21 @@ entry:
 }
 
 define <8 x half> @test_rot180_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot180_16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.8h, #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_16x8
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #180
-;
   %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot180.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c)
   ret <8 x half> %res
 }
 
 define <8 x half> @test_rot180_16x8_lane_1(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot180_16x8_lane_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.h[1], #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_16x8_lane_1
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[1], #180
-;
   %c.cast = bitcast <8 x half> %c to <4 x i32>
   %c.dup = shufflevector <4 x i32> %c.cast , <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %c.res = bitcast <4 x i32> %c.dup to <8 x half>
@@ -172,19 +193,21 @@ entry:
 }
 
 define <8 x half> @test_rot270_16x8(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot270_16x8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.8h, #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_16x8
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, #270
-;
   %res = tail call <8 x half> @llvm.aarch64.neon.vcmla.rot270.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c)
   ret <8 x half> %res
 }
 
 define <8 x half> @test_rot270_16x8_lane_0(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_rot270_16x8_lane_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.8h, v1.8h, v2.h[0], #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_16x8_lane_0
-; CHECK: fcmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[0], #270
-;
   %c.cast = bitcast <8 x half> %c to <4 x i32>
   %c.dup = shufflevector <4 x i32> %c.cast , <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
   %c.res = bitcast <4 x i32> %c.dup to <8 x half>
@@ -193,19 +216,21 @@ entry:
 }
 
 define <4 x float> @test_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: test_32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4s, v1.4s, v2.4s, #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_32x4
-; CHECK: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #0
-;
   %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot0.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_32x4_lane_0(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: test_32x4_lane_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4s, v1.4s, v2.s[0], #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_32x4_lane_0
-; CHECK: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.s[0], #0
-;
   %c.cast = bitcast <4 x float> %c to <2 x i64>
   %c.dup = shufflevector <2 x i64> %c.cast , <2 x i64> undef, <2 x i32> <i32 0, i32 0>
   %c.res = bitcast <2 x i64> %c.dup to <4 x float>
@@ -214,68 +239,170 @@ entry:
 }
 
 define <4 x float> @test_rot90_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: test_rot90_32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4s, v1.4s, v2.4s, #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_32x4
-; CHECK: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #90
-;
   %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot90.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_rot180_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: test_rot180_32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4s, v1.4s, v2.4s, #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_32x4
-; CHECK: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #180
-;
   %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot180.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_rot270_32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: test_rot270_32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.4s, v1.4s, v2.4s, #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_32x4
-; CHECK: fcmla v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, #270
-;
   %res = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot270.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
   ret <4 x float> %res
 }
 
 define <2 x double> @test_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: test_64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2d, v1.2d, v2.2d, #0
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_64x2
-; CHECK: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #0
-;
   %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot0.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_rot90_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: test_rot90_64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2d, v1.2d, v2.2d, #90
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot90_64x2
-; CHECK: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #90
-;
   %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot90.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_rot180_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: test_rot180_64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2d, v1.2d, v2.2d, #180
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot180_64x2
-; CHECK: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #180
-;
   %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot180.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_rot270_64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: test_rot270_64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmla v0.2d, v1.2d, v2.2d, #270
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: test_rot270_64x2
-; CHECK: fcmla v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, #270
-;
   %res = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
   ret <2 x double> %res
 }
 
+define <4 x float> @reassoc_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_f32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    fcmla v3.4s, v1.4s, v2.4s, #0
+; CHECK-NEXT:    fadd v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot0.v4f32(<4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %d, %a
+  ret <4 x float> %res
+}
+
+define <4 x float> @reassoc_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_c_f32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    fcmla v3.4s, v1.4s, v2.4s, #90
+; CHECK-NEXT:    fadd v0.4s, v0.4s, v3.4s
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot90.v4f32(<4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %a, %d
+  ret <4 x float> %res
+}
+
+define <4 x half> @reassoc_f16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: reassoc_f16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi d3, #0000000000000000
+; CHECK-NEXT:    fcmla v3.4h, v1.4h, v2.4h, #180
+; CHECK-NEXT:    fadd v0.4h, v3.4h, v0.4h
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot180.v4f16(<4 x half> zeroinitializer, <4 x half> %b, <4 x half> %c)
+  %res = fadd fast <4 x half> %d, %a
+  ret <4 x half> %res
+}
+
+define <4 x half> @reassoc_c_f16x4(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: reassoc_c_f16x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi d3, #0000000000000000
+; CHECK-NEXT:    fcmla v3.4h, v1.4h, v2.4h, #270
+; CHECK-NEXT:    fadd v0.4h, v0.4h, v3.4h
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <4 x half> @llvm.aarch64.neon.vcmla.rot270.v4f16(<4 x half> zeroinitializer, <4 x half> %b, <4 x half> %c)
+  %res = fadd fast <4 x half> %a, %d
+  ret <4 x half> %res
+}
+
+define <2 x double> @reassoc_f64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %g) {
+; CHECK-LABEL: reassoc_f64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    fcmla v0.2d, v1.2d, v2.2d, #270
+; CHECK-NEXT:    fcmla v4.2d, v2.2d, v3.2d, #270
+; CHECK-NEXT:    fadd v0.2d, v4.2d, v0.2d
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
+  %e = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> zeroinitializer, <2 x double> %c, <2 x double> %g)
+  %res = fadd fast <2 x double> %e, %d
+  ret <2 x double> %res
+}
+
+define <2 x double> @reassoc_c_f64x2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %g) {
+; CHECK-LABEL: reassoc_c_f64x2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov v4.16b, v0.16b
+; CHECK-NEXT:    fcmla v0.2d, v2.2d, v3.2d, #270
+; CHECK-NEXT:    fcmla v4.2d, v1.2d, v2.2d, #270
+; CHECK-NEXT:    fadd v0.2d, v0.2d, v4.2d
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
+  %e = tail call <2 x double> @llvm.aarch64.neon.vcmla.rot270.v2f64(<2 x double> %a, <2 x double> %c, <2 x double> %g)
+  %res = fadd fast <2 x double> %e, %d
+  ret <2 x double> %res
+}
+
+define <4 x float> @reassoc_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_nonfast_f32x4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    fcmla v3.4s, v1.4s, v2.4s, #0
+; CHECK-NEXT:    fadd v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %d = tail call <4 x float> @llvm.aarch64.neon.vcmla.rot0.v4f32(<4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd <4 x float> %d, %a
+  ret <4 x float> %res
+}
+
 declare <4 x half> @llvm.aarch64.neon.vcmla.rot0.v4f16(<4 x half>, <4 x half>, <4 x half>)
 declare <4 x half> @llvm.aarch64.neon.vcmla.rot90.v4f16(<4 x half>, <4 x half>, <4 x half>)
 declare <4 x half> @llvm.aarch64.neon.vcmla.rot180.v4f16(<4 x half>, <4 x half>, <4 x half>)


        


More information about the llvm-commits mailing list