[llvm] 17ac26a - [AArch64][SVE] NFC: Add tests for masked FP arith patterns (D130564)

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 8 01:44:55 PDT 2022


Author: Cullen Rhodes
Date: 2022-08-08T08:44:13Z
New Revision: 17ac26a78eaa1cbc8543c97f18d37c2e0c8bfad3

URL: https://github.com/llvm/llvm-project/commit/17ac26a78eaa1cbc8543c97f18d37c2e0c8bfad3
DIFF: https://github.com/llvm/llvm-project/commit/17ac26a78eaa1cbc8543c97f18d37c2e0c8bfad3.diff

LOG: [AArch64][SVE] NFC: Add tests for masked FP arith patterns (D130564)

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-fp-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
index 9daa936e52c0f..98cb7b6f93659 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
@@ -822,3 +822,159 @@ define <vscale x 2 x double> @fnmsb_d(<vscale x 2 x double> %m1, <vscale x 2 x d
   %res = fsub contract <vscale x 2 x double> %mul, %acc
   ret <vscale x 2 x double> %res
 }
+
+define <vscale x 8 x half> @fadd_h_sel(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: fadd_h_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %b, <vscale x 8 x half> zeroinitializer
+  %fadd = fadd nsz <vscale x 8 x half> %a, %sel
+  ret <vscale x 8 x half> %fadd
+}
+
+define <vscale x 4 x float> @fadd_s_sel(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: fadd_s_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %b, <vscale x 4 x float> zeroinitializer
+  %fadd = fadd nsz <vscale x 4 x float> %a, %sel
+  ret <vscale x 4 x float> %fadd
+}
+
+define <vscale x 2 x double> @fadd_d_sel(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: fadd_d_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %b, <vscale x 2 x double> zeroinitializer
+  %fadd = fadd nsz <vscale x 2 x double> %a, %sel
+  ret <vscale x 2 x double> %fadd
+}
+
+define <vscale x 8 x half> @fsub_h_sel(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: fsub_h_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %b, <vscale x 8 x half> zeroinitializer
+  %fsub = fsub <vscale x 8 x half> %a, %sel
+  ret <vscale x 8 x half> %fsub
+}
+
+define <vscale x 4 x float> @fsub_s_sel(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: fsub_s_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %b, <vscale x 4 x float> zeroinitializer
+  %fsub = fsub <vscale x 4 x float> %a, %sel
+  ret <vscale x 4 x float> %fsub
+}
+
+define <vscale x 2 x double> @fsub_d_sel(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: fsub_d_sel:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %b, <vscale x 2 x double> zeroinitializer
+  %fsub = fsub <vscale x 2 x double> %a, %sel
+  ret <vscale x 2 x double> %fsub
+}
+
+define <vscale x 8 x half> @fadd_sel_fmul_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: fadd_sel_fmul_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.h, #0 // =0x0
+; CHECK-NEXT:    fmul z1.h, z1.h, z2.h
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z3.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 8 x half> %b, %c
+  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %fmul, <vscale x 8 x half> zeroinitializer
+  %fadd = fadd nsz contract <vscale x 8 x half> %a, %sel
+  ret <vscale x 8 x half> %fadd
+}
+
+define <vscale x 4 x float> @fadd_sel_fmul_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: fadd_sel_fmul_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.s, #0 // =0x0
+; CHECK-NEXT:    fmul z1.s, z1.s, z2.s
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z3.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 4 x float> %b, %c
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %fmul, <vscale x 4 x float> zeroinitializer
+  %fadd = fadd nsz contract <vscale x 4 x float> %a, %sel
+  ret <vscale x 4 x float> %fadd
+}
+
+define <vscale x 2 x double> @fadd_sel_fmul_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: fadd_sel_fmul_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.d, #0 // =0x0
+; CHECK-NEXT:    fmul z1.d, z1.d, z2.d
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z3.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 2 x double> %b, %c
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %fmul, <vscale x 2 x double> zeroinitializer
+  %fadd = fadd nsz contract <vscale x 2 x double> %a, %sel
+  ret <vscale x 2 x double> %fadd
+}
+
+define <vscale x 8 x half> @fsub_sel_fmul_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: fsub_sel_fmul_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.h, #0 // =0x0
+; CHECK-NEXT:    fmul z1.h, z1.h, z2.h
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z3.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 8 x half> %b, %c
+  %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %fmul, <vscale x 8 x half> zeroinitializer
+  %fsub = fsub contract <vscale x 8 x half> %a, %sel
+  ret <vscale x 8 x half> %fsub
+}
+
+define <vscale x 4 x float> @fsub_sel_fmul_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: fsub_sel_fmul_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.s, #0 // =0x0
+; CHECK-NEXT:    fmul z1.s, z1.s, z2.s
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z3.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 4 x float> %b, %c
+  %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %fmul, <vscale x 4 x float> zeroinitializer
+  %fsub = fsub contract <vscale x 4 x float> %a, %sel
+  ret <vscale x 4 x float> %fsub
+}
+
+define <vscale x 2 x double> @fsub_sel_fmul_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: fsub_sel_fmul_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z3.d, #0 // =0x0
+; CHECK-NEXT:    fmul z1.d, z1.d, z2.d
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z3.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %fmul = fmul <vscale x 2 x double> %b, %c
+  %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %fmul, <vscale x 2 x double> zeroinitializer
+  %fsub = fsub contract <vscale x 2 x double> %a, %sel
+  ret <vscale x 2 x double> %fsub
+}


        


More information about the llvm-commits mailing list