[llvm] 7876489 - [ARM] Add some tests for MVE vcmla combines. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 09:45:56 PDT 2023


Author: David Green
Date: 2023-04-04T17:45:50+01:00
New Revision: 787648989e8ac380b392bd1047ac309475fdcb90

URL: https://github.com/llvm/llvm-project/commit/787648989e8ac380b392bd1047ac309475fdcb90
DIFF: https://github.com/llvm/llvm-project/commit/787648989e8ac380b392bd1047ac309475fdcb90.diff

LOG: [ARM] Add some tests for MVE vcmla combines. NFC

See D147200 and D147201

Added: 
    llvm/test/CodeGen/Thumb2/mve-vcmla.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcmla.ll b/llvm/test/CodeGen/Thumb2/mve-vcmla.ll
new file mode 100644
index 0000000000000..d0d65bdf836ba
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vcmla.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
+
+declare <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32, <8 x half>, <8 x half>, <8 x half>)
+declare <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32, <4 x float>, <4 x float>, <4 x float>)
+declare <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32, <8 x half>, <8 x half>)
+declare <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32, <4 x float>, <4 x float>)
+
+
+define arm_aapcs_vfpcc <4 x float> @reassoc_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vcmla.f32 q3, q1, q2, #0
+; CHECK-NEXT:    vadd.f32 q0, q3, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %d, %a
+  ret <4 x float> %res
+}
+
+define arm_aapcs_vfpcc <4 x float> @reassoc_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_c_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vcmla.f32 q3, q1, q2, #90
+; CHECK-NEXT:    vadd.f32 q0, q0, q3
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %a, %d
+  ret <4 x float> %res
+}
+
+define arm_aapcs_vfpcc <8 x half> @reassoc_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: reassoc_f16x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vcmla.f16 q3, q1, q2, #180
+; CHECK-NEXT:    vadd.f16 q0, q3, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 2, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c)
+  %res = fadd fast <8 x half> %d, %a
+  ret <8 x half> %res
+}
+
+define arm_aapcs_vfpcc <8 x half> @reassoc_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: reassoc_c_f16x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vcmla.f16 q3, q1, q2, #270
+; CHECK-NEXT:    vadd.f16 q0, q0, q3
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 3, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c)
+  %res = fadd fast <8 x half> %a, %d
+  ret <8 x half> %res
+}
+
+define arm_aapcs_vfpcc <4 x float> @reassoc_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: reassoc_nonfast_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vcmla.f32 q3, q1, q2, #0
+; CHECK-NEXT:    vadd.f32 q0, q3, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
+  %res = fadd <4 x float> %d, %a
+  ret <4 x float> %res
+}
+
+
+
+define arm_aapcs_vfpcc <4 x float> @muladd_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: muladd_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmul.f32 q3, q1, q2, #0
+; CHECK-NEXT:    vadd.f32 q0, q3, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %d, %a
+  ret <4 x float> %res
+}
+
+define arm_aapcs_vfpcc <4 x float> @muladd_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: muladd_c_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmul.f32 q3, q1, q2, #90
+; CHECK-NEXT:    vadd.f32 q0, q0, q3
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> %b, <4 x float> %c)
+  %res = fadd fast <4 x float> %a, %d
+  ret <4 x float> %res
+}
+
+define arm_aapcs_vfpcc <8 x half> @muladd_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: muladd_f16x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmul.f16 q1, q1, q2, #180
+; CHECK-NEXT:    vadd.f16 q0, q1, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> %b, <8 x half> %c)
+  %res = fadd fast <8 x half> %d, %a
+  ret <8 x half> %res
+}
+
+define arm_aapcs_vfpcc <8 x half> @muladd_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: muladd_c_f16x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmul.f16 q1, q1, q2, #270
+; CHECK-NEXT:    vadd.f16 q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> %b, <8 x half> %c)
+  %res = fadd fast <8 x half> %a, %d
+  ret <8 x half> %res
+}
+
+define arm_aapcs_vfpcc <4 x float> @muladd_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
+; CHECK-LABEL: muladd_nonfast_f32x4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmul.f32 q3, q1, q2, #0
+; CHECK-NEXT:    vadd.f32 q0, q3, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c)
+  %res = fadd <4 x float> %d, %a
+  ret <4 x float> %res
+}


        


More information about the llvm-commits mailing list