[llvm] ddfc8bf - ARM: Add baseline tests for fadd with select combine

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 07:28:11 PST 2022


Author: Matt Arsenault
Date: 2022-12-19T10:28:07-05:00
New Revision: ddfc8bfe07fa53f9b81dc6027ec7e94f8cb6c9ac

URL: https://github.com/llvm/llvm-project/commit/ddfc8bfe07fa53f9b81dc6027ec7e94f8cb6c9ac
DIFF: https://github.com/llvm/llvm-project/commit/ddfc8bfe07fa53f9b81dc6027ec7e94f8cb6c9ac.diff

LOG: ARM: Add baseline tests for fadd with select combine

Added: 
    llvm/test/CodeGen/ARM/fadd-select-fneg-combine.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/fadd-select-fneg-combine.ll b/llvm/test/CodeGen/ARM/fadd-select-fneg-combine.ll
new file mode 100644
index 000000000000..957daa72b91d
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/fadd-select-fneg-combine.ll
@@ -0,0 +1,333 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-- -mattr=+mve.fp < %s | FileCheck %s
+
+define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z) {
+; CHECK-LABEL: fadd_select_fneg_fneg_f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    eor r2, r2, #-2147483648
+; CHECK-NEXT:    eor r1, r1, #-2147483648
+; CHECK-NEXT:    vmov s0, r3
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov s2, r2
+; CHECK-NEXT:    vmov s4, r1
+; CHECK-NEXT:    vseleq.f32 s2, s4, s2
+; CHECK-NEXT:    vadd.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg float %x
+  %neg.y  = fneg float %y
+  %select = select i1 %cmp, float %neg.x, float %neg.y
+  %add = fadd float %select, %z
+  ret float %add
+}
+
+define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
+; CHECK-LABEL: fadd_select_fneg_fneg_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov.f16 s2, r1
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    vneg.f16 s2, s2
+; CHECK-NEXT:    vseleq.f16 s0, s2, s0
+; CHECK-NEXT:    vmov.f16 s2, r3
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg half %x
+  %neg.y = fneg half %y
+  %select = select i1 %cmp, half %neg.x, half %neg.y
+  %add = fadd half %select, %z
+  ret half %add
+}
+
+define <2 x float> @fadd_select_fneg_fneg_v2f32(i32 %arg0, <2 x float> %x, <2 x float> %y, <2 x float> %z) {
+; CHECK-LABEL: fadd_select_fneg_fneg_v2f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r1, sp, #24
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    beq .LBB2_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    add r0, sp, #8
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    b .LBB2_3
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    vmov d2, r2, r3
+; CHECK-NEXT:  .LBB2_3: @ %select.end
+; CHECK-NEXT:    vneg.f32 q1, q1
+; CHECK-NEXT:    vadd.f32 q0, q1, q0
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg <2 x float> %x
+  %neg.y  = fneg <2 x float> %y
+  %select = select i1 %cmp, <2 x float> %neg.x, <2 x float> %neg.y
+  %add = fadd <2 x float> %select, %z
+  ret <2 x float> %add
+}
+
+define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x half> %y, <2 x half> %z) {
+; CHECK-LABEL: fadd_select_fneg_fneg_v2f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r1, sp, #24
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    beq .LBB3_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    add r0, sp, #8
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    b .LBB3_3
+; CHECK-NEXT:  .LBB3_2:
+; CHECK-NEXT:    vmov d2, r2, r3
+; CHECK-NEXT:  .LBB3_3: @ %select.end
+; CHECK-NEXT:    vneg.f16 q1, q1
+; CHECK-NEXT:    vadd.f16 q0, q1, q0
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg <2 x half> %x
+  %neg.y = fneg <2 x half> %y
+  %select = select i1 %cmp, <2 x half> %neg.x, <2 x half> %neg.y
+  %add = fadd <2 x half> %select, %z
+  ret <2 x half> %add
+}
+
+define float @fadd_select_fsub_fsub_f32(i32 %arg0, float %x, float %y, float %z) {
+; CHECK-LABEL: fadd_select_fsub_fsub_f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s0, #2.000000e+00
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov s4, r2
+; CHECK-NEXT:    vmov s2, r1
+; CHECK-NEXT:    vmov s6, r3
+; CHECK-NEXT:    vsub.f32 s4, s4, s0
+; CHECK-NEXT:    vsub.f32 s0, s2, s0
+; CHECK-NEXT:    vseleq.f32 s0, s0, s4
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fsub nsz float %x, 2.0
+  %neg.y  = fsub nsz float %y, 2.0
+  %select = select i1 %cmp, float %neg.x, float %neg.y
+  %add = fadd float %select, %z
+  ret float %add
+}
+
+define float @fneg_select_fmul_fmul_f32(i32 %arg0, float %x, float %y) {
+; CHECK-LABEL: fneg_select_fmul_fmul_f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s0, #4.000000e+00
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov.f32 s2, #8.000000e+00
+; CHECK-NEXT:    vmov s4, r2
+; CHECK-NEXT:    vmov s6, r1
+; CHECK-NEXT:    vmul.f32 s0, s4, s0
+; CHECK-NEXT:    vmul.f32 s2, s6, s2
+; CHECK-NEXT:    vseleq.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fmul float %x, 8.0
+  %neg.y  = fmul float %y, 4.0
+  %select = select i1 %cmp, float %neg.x, float %neg.y
+  %neg = fneg float %select
+  ret float %neg
+}
+
+define half @fadd_select_fsub_fsub_f16(i32 %arg0, half %x, half %y, half %z) {
+; CHECK-LABEL: fadd_select_fsub_fsub_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r2
+; CHECK-NEXT:    vmov.f16 s2, #2.000000e+00
+; CHECK-NEXT:    vmov.f16 s4, r1
+; CHECK-NEXT:    vsub.f16 s0, s0, s2
+; CHECK-NEXT:    vsub.f16 s2, s4, s2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f16 s0, s2, s0
+; CHECK-NEXT:    vmov.f16 s2, r3
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %sub.x = fsub nsz half %x, 2.0
+  %sub.y  = fsub nsz half %y, 2.0
+  %select = select i1 %cmp, half %sub.x, half %sub.y
+  %add = fadd half %select, %z
+  ret half %add
+}
+
+define half @fneg_select_fmul_fmul_f16(i32 %arg0, half %x, half %y) {
+; CHECK-LABEL: fneg_select_fmul_fmul_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r2
+; CHECK-NEXT:    vmov.f16 s2, #4.000000e+00
+; CHECK-NEXT:    vmul.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r1
+; CHECK-NEXT:    vmov.f16 s4, #8.000000e+00
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmul.f16 s2, s2, s4
+; CHECK-NEXT:    vseleq.f16 s0, s2, s0
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %mul.x = fmul half %x, 8.0
+  %mul.y  = fmul half %y, 4.0
+  %select = select i1 %cmp, half %mul.x, half %mul.y
+  %neg = fneg half %select
+  ret half %neg
+}
+
+define half @fadd_select_fsub_arg_f16(i32 %arg0, half %x, half %y, half %z) {
+; CHECK-LABEL: fadd_select_fsub_arg_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, #-2.000000e+00
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r3
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %sub.x = fsub nsz half %x, 2.0
+  %select = select i1 %cmp, half %sub.x, half %y
+  %add = fadd half %select, %z
+  ret half %add
+}
+
+define half @fadd_select_arg_fsub_f16(i32 %arg0, half %x, half %y, half %z) {
+; CHECK-LABEL: fadd_select_arg_fsub_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r2
+; CHECK-NEXT:    vmov.f16 s2, #-2.000000e+00
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f16 s0, s2, s0
+; CHECK-NEXT:    vmov.f16 s2, r3
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %sub.y = fsub nsz half %y, 2.0
+  %select = select i1 %cmp, half %x, half %sub.y
+  %add = fadd half %select, %z
+  ret half %add
+}
+
+define half @fadd_select_fsub_select_f16(i32 %arg0, half %x, half %y, half %z) {
+; CHECK-LABEL: fadd_select_fsub_select_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, #2.000000e+00
+; CHECK-NEXT:    vsub.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s4, r2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vsub.f16 s2, s4, s2
+; CHECK-NEXT:    vseleq.f16 s0, s0, s4
+; CHECK-NEXT:    vseleq.f16 s0, s2, s0
+; CHECK-NEXT:    vmov.f16 s2, r3
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %sub.x = fsub nsz half %x, 2.0
+  %sub.y = fsub nsz half %y, 2.0
+  %select0 = select i1 %cmp, half %sub.x, half %y
+  %select1 = select i1 %cmp, half %sub.y, half %select0
+  %add = fadd half %select1, %z
+  ret half %add
+}
+
+define half @fadd_select_fneg_negk_f16(i32 %arg0, half %x, half %y) {
+; CHECK-LABEL: fadd_select_fneg_negk_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, #-4.000000e+00
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r2
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg half %x
+  %select = select i1 %cmp, half %neg.x, half -4.0
+  %add = fadd half %select, %y
+  ret half %add
+}
+
+define half @fadd_select_fneg_posk_f16(i32 %arg0, half %x, half %y) {
+; CHECK-LABEL: fadd_select_fneg_posk_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, #4.000000e+00
+; CHECK-NEXT:    vneg.f16 s0, s0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f16 s0, s0, s2
+; CHECK-NEXT:    vmov.f16 s2, r2
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
+  %cmp = icmp eq i32 %arg0, 0
+  %neg.x = fneg half %x
+  %select = select i1 %cmp, half %neg.x, half 4.0
+  %add = fadd half %select, %y
+  ret half %add
+}
+
+define <8 x half> @fadd_vselect_fneg_posk_v8f16(<8 x i32> %arg0, <8 x half> %x, <8 x half> %y) {
+; CHECK-LABEL: fadd_vselect_fneg_posk_v8f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    vcmp.i32 eq, q0, zr
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vmov.i8 q1, #0xff
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vpsel q2, q1, q0
+; CHECK-NEXT:    vcmp.i32 eq, q3, zr
+; CHECK-NEXT:    vpsel q1, q1, q0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov r1, r0, d4
+; CHECK-NEXT:    vmov r4, r5, d5
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov r2, r3, d2
+; CHECK-NEXT:    vmov.16 q2[2], r4
+; CHECK-NEXT:    vmov lr, r12, d3
+; CHECK-NEXT:    vmov.16 q2[3], r5
+; CHECK-NEXT:    vneg.f16 q0, q0
+; CHECK-NEXT:    vmov.16 q2[4], r2
+; CHECK-NEXT:    vmov.i16 q1, #0x4400
+; CHECK-NEXT:    vmov.16 q2[5], r3
+; CHECK-NEXT:    add r0, sp, #48
+; CHECK-NEXT:    vmov.16 q2[6], lr
+; CHECK-NEXT:    vmov.16 q2[7], r12
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vadd.f16 q0, q0, q1
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    pop {r4, r5, r11, pc}
+  %cmp = icmp eq <8 x i32> %arg0, zeroinitializer
+  %neg.x = fneg <8 x half> %x
+  %select = select <8 x i1> %cmp, <8 x half> %neg.x, <8 x half> <half 4.0, half 4.0, half 4.0, half 4.0, half 4.0, half 4.0, half 4.0, half 4.0>
+  %add = fadd <8 x half> %select, %y
+  ret <8 x half> %add
+}


        


More information about the llvm-commits mailing list