[llvm] e13582e - [CodeGen] Precommit tests for D153355

Igor Kirillov via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 4 02:30:09 PDT 2023


Author: Igor Kirillov
Date: 2023-07-04T09:29:38Z
New Revision: e13582e9e34511e9934b3674007e43f934b9de25

URL: https://github.com/llvm/llvm-project/commit/e13582e9e34511e9934b3674007e43f934b9de25
DIFF: https://github.com/llvm/llvm-project/commit/e13582e9e34511e9934b3674007e43f934b9de25.diff

LOG: [CodeGen] Precommit tests for D153355

Differential Revision: https://reviews.llvm.org/D153856

Added: 
    llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
new file mode 100644
index 00000000000000..b15fbd2e563289
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
@@ -0,0 +1,107 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+sve -o - | FileCheck %s
+
+target triple = "aarch64-arm-none-eabi"
+
+; a[i] * b[i] * (11.0 + 3.0.i);
+;
+define <vscale x 4 x double> @complex_mul_const(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
+; CHECK-LABEL: complex_mul_const:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uzp1 z4.d, z2.d, z3.d
+; CHECK-NEXT:    uzp1 z5.d, z0.d, z1.d
+; CHECK-NEXT:    uzp2 z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uzp2 z1.d, z2.d, z3.d
+; CHECK-NEXT:    fmul z2.d, z4.d, z0.d
+; CHECK-NEXT:    fmla z2.d, p0/m, z1.d, z5.d
+; CHECK-NEXT:    fmul z0.d, z1.d, z0.d
+; CHECK-NEXT:    fmov z1.d, #11.00000000
+; CHECK-NEXT:    fnmls z0.d, p0/m, z4.d, z5.d
+; CHECK-NEXT:    fmov z3.d, #3.00000000
+; CHECK-NEXT:    fmul z4.d, z2.d, z1.d
+; CHECK-NEXT:    fmul z2.d, z2.d, z3.d
+; CHECK-NEXT:    fmla z4.d, p0/m, z0.d, z3.d
+; CHECK-NEXT:    fnmsb z1.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    zip1 z0.d, z1.d, z4.d
+; CHECK-NEXT:    zip2 z1.d, z1.d, z4.d
+; CHECK-NEXT:    ret
+entry:
+  %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
+  %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
+  %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
+  %strided.vec48 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
+  %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec48, 0
+  %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec48, 1
+  %4 = fmul fast <vscale x 2 x double> %3, %0
+  %5 = fmul fast <vscale x 2 x double> %2, %1
+  %6 = fadd fast <vscale x 2 x double> %4, %5
+  %7 = fmul fast <vscale x 2 x double> %2, %0
+  %8 = fmul fast <vscale x 2 x double> %3, %1
+  %9 = fsub fast <vscale x 2 x double> %7, %8
+  %10 = fmul fast <vscale x 2 x double> %9, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 3.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
+  %11 = fmul fast <vscale x 2 x double> %6, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 1.100000e+01, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
+  %12 = fadd fast <vscale x 2 x double> %10, %11
+  %13 = fmul fast <vscale x 2 x double> %9, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 1.100000e+01, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
+  %14 = fmul fast <vscale x 2 x double> %6, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 3.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
+  %15 = fsub fast <vscale x 2 x double> %13, %14
+  %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %15, <vscale x 2 x double> %12)
+  ret <vscale x 4 x double> %interleaved.vec
+}
+
+; a[i] * b[i] * c;
+;
+define <vscale x 4 x double> @complex_mul_non_const(<vscale x 4 x double> %a, <vscale x 4 x double> %b, [2 x double] %c) {
+; CHECK-LABEL: complex_mul_non_const:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uzp1 z6.d, z2.d, z3.d
+; CHECK-NEXT:    uzp1 z7.d, z0.d, z1.d
+; CHECK-NEXT:    uzp2 z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uzp2 z1.d, z2.d, z3.d
+; CHECK-NEXT:    fmul z2.d, z6.d, z0.d
+; CHECK-NEXT:    fmla z2.d, p0/m, z1.d, z7.d
+; CHECK-NEXT:    // kill: def $d4 killed $d4 def $z4
+; CHECK-NEXT:    fmul z0.d, z1.d, z0.d
+; CHECK-NEXT:    mov z4.d, d4
+; CHECK-NEXT:    // kill: def $d5 killed $d5 def $z5
+; CHECK-NEXT:    mov z3.d, d5
+; CHECK-NEXT:    fnmls z0.d, p0/m, z6.d, z7.d
+; CHECK-NEXT:    fmul z1.d, z2.d, z4.d
+; CHECK-NEXT:    fmul z2.d, z2.d, z3.d
+; CHECK-NEXT:    fmla z1.d, p0/m, z0.d, z3.d
+; CHECK-NEXT:    fnmls z2.d, p0/m, z0.d, z4.d
+; CHECK-NEXT:    zip1 z0.d, z2.d, z1.d
+; CHECK-NEXT:    zip2 z1.d, z2.d, z1.d
+; CHECK-NEXT:    ret
+entry:
+  %c.coerce.fca.0.extract = extractvalue [2 x double] %c, 0
+  %c.coerce.fca.1.extract = extractvalue [2 x double] %c, 1
+  %broadcast.splatinsert = insertelement <vscale x 2 x double> poison, double %c.coerce.fca.1.extract, i64 0
+  %broadcast.splat = shufflevector <vscale x 2 x double> %broadcast.splatinsert, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %broadcast.splatinsert49 = insertelement <vscale x 2 x double> poison, double %c.coerce.fca.0.extract, i64 0
+  %broadcast.splat50 = shufflevector <vscale x 2 x double> %broadcast.splatinsert49, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
+  %0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
+  %1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
+  %strided.vec48 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %b)
+  %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec48, 0
+  %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec48, 1
+  %4 = fmul fast <vscale x 2 x double> %3, %0
+  %5 = fmul fast <vscale x 2 x double> %2, %1
+  %6 = fadd fast <vscale x 2 x double> %4, %5
+  %7 = fmul fast <vscale x 2 x double> %2, %0
+  %8 = fmul fast <vscale x 2 x double> %3, %1
+  %9 = fsub fast <vscale x 2 x double> %7, %8
+  %10 = fmul fast <vscale x 2 x double> %9, %broadcast.splat
+  %11 = fmul fast <vscale x 2 x double> %6, %broadcast.splat50
+  %12 = fadd fast <vscale x 2 x double> %10, %11
+  %13 = fmul fast <vscale x 2 x double> %9, %broadcast.splat50
+  %14 = fmul fast <vscale x 2 x double> %6, %broadcast.splat
+  %15 = fsub fast <vscale x 2 x double> %13, %14
+  %interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %15, <vscale x 2 x double> %12)
+  ret <vscale x 4 x double> %interleaved.vec
+}
+
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll
new file mode 100644
index 00000000000000..0123406f92113d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat.ll
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+complxnum -o - | FileCheck %s
+
+target triple = "aarch64-arm-none-eabi"
+
+
+; a[i] * b[i] * (11.0 + 3.0.i);
+;
+define <4 x double> @complex_mul_const(<4 x double> %a, <4 x double> %b) {
+; CHECK-LABEL: complex_mul_const:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip1 v5.2d, v2.2d, v3.2d
+; CHECK-NEXT:    zip2 v6.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip2 v2.2d, v2.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    fmov v4.2d, #3.00000000
+; CHECK-NEXT:    fmul v1.2d, v5.2d, v6.2d
+; CHECK-NEXT:    fmul v3.2d, v2.2d, v6.2d
+; CHECK-NEXT:    fmla v1.2d, v0.2d, v2.2d
+; CHECK-NEXT:    fneg v2.2d, v3.2d
+; CHECK-NEXT:    fmov v3.2d, #11.00000000
+; CHECK-NEXT:    fmul v6.2d, v1.2d, v4.2d
+; CHECK-NEXT:    fmla v2.2d, v0.2d, v5.2d
+; CHECK-NEXT:    fmul v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    fneg v5.2d, v6.2d
+; CHECK-NEXT:    fmla v1.2d, v4.2d, v2.2d
+; CHECK-NEXT:    fmla v5.2d, v3.2d, v2.2d
+; CHECK-NEXT:    zip1 v0.2d, v5.2d, v1.2d
+; CHECK-NEXT:    zip2 v1.2d, v5.2d, v1.2d
+; CHECK-NEXT:    ret
+entry:
+  %strided.vec = shufflevector <4 x double> %a, <4 x double> poison, <2 x i32> <i32 0, i32 2>
+  %strided.vec47 = shufflevector <4 x double> %a, <4 x double> poison, <2 x i32> <i32 1, i32 3>
+  %strided.vec49 = shufflevector <4 x double> %b, <4 x double> poison, <2 x i32> <i32 0, i32 2>
+  %strided.vec50 = shufflevector <4 x double> %b, <4 x double> poison, <2 x i32> <i32 1, i32 3>
+  %0 = fmul fast <2 x double> %strided.vec50, %strided.vec
+  %1 = fmul fast <2 x double> %strided.vec49, %strided.vec47
+  %2 = fadd fast <2 x double> %0, %1
+  %3 = fmul fast <2 x double> %strided.vec49, %strided.vec
+  %4 = fmul fast <2 x double> %strided.vec50, %strided.vec47
+  %5 = fsub fast <2 x double> %3, %4
+  %6 = fmul fast <2 x double> %5, <double 3.000000e+00, double 3.000000e+00>
+  %7 = fmul fast <2 x double> %2, <double 1.100000e+01, double 1.100000e+01>
+  %8 = fadd fast <2 x double> %6, %7
+  %9 = fmul fast <2 x double> %5, <double 1.100000e+01, double 1.100000e+01>
+  %10 = fmul fast <2 x double> %2, <double 3.000000e+00, double 3.000000e+00>
+  %11 = fsub fast <2 x double> %9, %10
+  %interleaved.vec = shufflevector <2 x double> %11, <2 x double> %8, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x double> %interleaved.vec
+}
+
+
+; a[i] * b[i] * c;
+;
+define <4 x double> @complex_mul_non_const(<4 x double> %a, <4 x double> %b, [2 x double] %c) {
+; CHECK-LABEL: complex_mul_non_const:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip1 v6.2d, v2.2d, v3.2d
+; CHECK-NEXT:    // kill: def $d5 killed $d5 def $q5
+; CHECK-NEXT:    // kill: def $d4 killed $d4 def $q4
+; CHECK-NEXT:    zip2 v7.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip2 v1.2d, v2.2d, v3.2d
+; CHECK-NEXT:    fmul v2.2d, v6.2d, v7.2d
+; CHECK-NEXT:    fmul v3.2d, v1.2d, v7.2d
+; CHECK-NEXT:    fmla v2.2d, v0.2d, v1.2d
+; CHECK-NEXT:    fneg v1.2d, v3.2d
+; CHECK-NEXT:    fmul v3.2d, v2.2d, v5.d[0]
+; CHECK-NEXT:    fmul v2.2d, v2.2d, v4.d[0]
+; CHECK-NEXT:    fmla v1.2d, v0.2d, v6.2d
+; CHECK-NEXT:    fneg v3.2d, v3.2d
+; CHECK-NEXT:    fmla v2.2d, v1.2d, v5.d[0]
+; CHECK-NEXT:    fmla v3.2d, v1.2d, v4.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v3.2d, v2.2d
+; CHECK-NEXT:    zip2 v1.2d, v3.2d, v2.2d
+; CHECK-NEXT:    ret
+entry:
+  %c.coerce.fca.1.extract = extractvalue [2 x double] %c, 1
+  %c.coerce.fca.0.extract = extractvalue [2 x double] %c, 0
+  %broadcast.splatinsert = insertelement <2 x double> poison, double %c.coerce.fca.1.extract, i64 0
+  %broadcast.splat = shufflevector <2 x double> %broadcast.splatinsert, <2 x double> poison, <2 x i32> zeroinitializer
+  %broadcast.splatinsert51 = insertelement <2 x double> poison, double %c.coerce.fca.0.extract, i64 0
+  %broadcast.splat52 = shufflevector <2 x double> %broadcast.splatinsert51, <2 x double> poison, <2 x i32> zeroinitializer
+  %strided.vec = shufflevector <4 x double> %a, <4 x double> poison, <2 x i32> <i32 0, i32 2>
+  %strided.vec47 = shufflevector <4 x double> %a, <4 x double> poison, <2 x i32> <i32 1, i32 3>
+  %strided.vec49 = shufflevector <4 x double> %b, <4 x double> poison, <2 x i32> <i32 0, i32 2>
+  %strided.vec50 = shufflevector <4 x double> %b, <4 x double> poison, <2 x i32> <i32 1, i32 3>
+  %0 = fmul fast <2 x double> %strided.vec50, %strided.vec
+  %1 = fmul fast <2 x double> %strided.vec49, %strided.vec47
+  %2 = fadd fast <2 x double> %0, %1
+  %3 = fmul fast <2 x double> %strided.vec49, %strided.vec
+  %4 = fmul fast <2 x double> %strided.vec50, %strided.vec47
+  %5 = fsub fast <2 x double> %3, %4
+  %6 = fmul fast <2 x double> %5, %broadcast.splat
+  %7 = fmul fast <2 x double> %2, %broadcast.splat52
+  %8 = fadd fast <2 x double> %6, %7
+  %9 = fmul fast <2 x double> %5, %broadcast.splat52
+  %10 = fmul fast <2 x double> %2, %broadcast.splat
+  %11 = fsub fast <2 x double> %9, %10
+  %interleaved.vec = shufflevector <2 x double> %11, <2 x double> %8, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x double> %interleaved.vec
+}


        


More information about the llvm-commits mailing list