[llvm] r332872 - [X86] Add test cases for D47012.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon May 21 12:33:42 PDT 2018
Author: ctopper
Date: Mon May 21 12:33:42 2018
New Revision: 332872
URL: http://llvm.org/viewvc/llvm-project?rev=332872&view=rev
Log:
[X86] Add test cases for D47012.
Patch by Thomasz Krupa.
Added:
llvm/trunk/test/CodeGen/X86/combine-select.ll
llvm/trunk/test/CodeGen/X86/fma-scalar-combine.ll
Added: llvm/trunk/test/CodeGen/X86/combine-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-select.ll?rev=332872&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-select.ll (added)
+++ llvm/trunk/test/CodeGen/X86/combine-select.ll Mon May 21 12:33:42 2018
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+define <4 x float> @select_mask_add_ss(<4 x float> %w, i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_mask_add_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vaddss %xmm2, %xmm1, %xmm2
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vmovss %xmm0, %xmm1, %xmm2 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fadd float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = extractelement <4 x float> %w, i32 0
+ %6 = select i1 %4, float %5, float %2
+ %7 = insertelement <4 x float> %a, float %6, i32 0
+ ret <4 x float> %7
+}
+
+define <4 x float> @select_maskz_add_ss(i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_maskz_add_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fadd float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = select i1 %4, float 0.000000e+00, float %2
+ %6 = insertelement <4 x float> %a, float %5, i32 0
+ ret <4 x float> %6
+}
+
+define <4 x float> @select_mask_sub_ss(<4 x float> %w, i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_mask_sub_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vsubss %xmm2, %xmm1, %xmm2
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vmovss %xmm0, %xmm1, %xmm2 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fsub float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = extractelement <4 x float> %w, i32 0
+ %6 = select i1 %4, float %5, float %2
+ %7 = insertelement <4 x float> %a, float %6, i32 0
+ ret <4 x float> %7
+}
+
+define <4 x float> @select_maskz_sub_ss(i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_maskz_sub_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm1
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fsub float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = select i1 %4, float 0.000000e+00, float %2
+ %6 = insertelement <4 x float> %a, float %5, i32 0
+ ret <4 x float> %6
+}
+
+define <4 x float> @select_mask_mul_ss(<4 x float> %w, i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_mask_mul_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vmulss %xmm2, %xmm1, %xmm2
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vmovss %xmm0, %xmm1, %xmm2 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fmul float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = extractelement <4 x float> %w, i32 0
+ %6 = select i1 %4, float %5, float %2
+ %7 = insertelement <4 x float> %a, float %6, i32 0
+ ret <4 x float> %7
+}
+
+define <4 x float> @select_maskz_mul_ss(i8 zeroext %u, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_maskz_mul_ss:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: vmulss %xmm1, %xmm0, %xmm1
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = extractelement <4 x float> %b, i32 0
+ %1 = extractelement <4 x float> %a, i32 0
+ %2 = fmul float %1, %0
+ %3 = and i8 %u, 1
+ %4 = icmp eq i8 %3, 0
+ %5 = select i1 %4, float 0.000000e+00, float %2
+ %6 = insertelement <4 x float> %a, float %5, i32 0
+ ret <4 x float> %6
+}
Added: llvm/trunk/test/CodeGen/X86/fma-scalar-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-scalar-combine.ll?rev=332872&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-scalar-combine.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fma-scalar-combine.ll Mon May 21 12:33:42 2018
@@ -0,0 +1,568 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -mattr=+fma -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
+
+define <2 x double> @combine_scalar_mask_fmadd_f32(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fmadd_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fadd fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %3
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask_fmadd_f64(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fmadd_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fadd fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %0
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_maskz_fmadd_32(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fmadd_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7e,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fadd fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float 0.000000e+00
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_maskz_fmadd_64(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fmadd_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fadd fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double 0.000000e+00
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask3_fmadd_32(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fmadd_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fadd fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %5
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask3_fmadd_64(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fmadd_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fadd fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %2
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask_fmsub_f32(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fmsub_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %3
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask_fmsub_f64(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fmsub_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %0
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_maskz_fmsub_32(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fmsub_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7e,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float 0.000000e+00
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_maskz_fmsub_64(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fmsub_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double 0.000000e+00
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask3_fmsub_32(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fmsub_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %6, %5
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %5
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask3_fmsub_64(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fmsub_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
+; CHECK-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %3, %2
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %2
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask_fnmadd_f32(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fnmadd_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %5, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %3
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask_fnmadd_f64(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fnmadd_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %2, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %0
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_maskz_fnmadd_32(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fnmadd_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7e,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %5, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float 0.000000e+00
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_maskz_fnmadd_64(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fnmadd_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %2, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double 0.000000e+00
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask3_fnmadd_32(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fnmadd_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %5, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %5
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask3_fnmadd_64(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fnmadd_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %2, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %2
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask_fnmsub_f32(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fnmsub_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %sub = fsub fast float -0.000000e+00, %5
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %sub, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %3
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask_fnmsub_f64(<2 x double> %a, i8 zeroext %k, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_mask_fnmsub_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %sub = fsub fast double -0.000000e+00, %2
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %sub, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %0
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_maskz_fnmsub_32(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fnmsub_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7e,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %sub = fsub fast float -0.000000e+00, %5
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %sub, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float 0.000000e+00
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_maskz_fnmsub_64(i8 zeroext %k, <2 x double> %a, <2 x double> %b, <2 x double> %c) {
+; CHECK-LABEL: combine_scalar_maskz_fnmsub_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0xc1]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %sub = fsub fast double -0.000000e+00, %2
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %sub, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double 0.000000e+00
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
+
+define <2 x double> @combine_scalar_mask3_fnmsub_32(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fnmsub_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0x7e,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = bitcast <2 x double> %a to <4 x float>
+ %1 = bitcast <2 x double> %b to <4 x float>
+ %2 = bitcast <2 x double> %c to <4 x float>
+ %3 = extractelement <4 x float> %0, i64 0
+ %4 = extractelement <4 x float> %1, i64 0
+ %5 = extractelement <4 x float> %2, i64 0
+ %sub = fsub fast float -0.000000e+00, %5
+ %6 = fmul fast float %4, %3
+ %7 = fsub fast float %sub, %6
+ %8 = bitcast i8 %k to <8 x i1>
+ %9 = extractelement <8 x i1> %8, i64 0
+ %10 = select i1 %9, float %7, float %5
+ %11 = insertelement <4 x float> %0, float %10, i64 0
+ %12 = bitcast <4 x float> %11 to <2 x double>
+ ret <2 x double> %12
+}
+
+define <2 x double> @combine_scalar_mask3_fnmsub_64(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 zeroext %k) {
+; CHECK-LABEL: combine_scalar_mask3_fnmsub_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
+; CHECK-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2
+; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf1,0xff,0x09,0x10,0xd1]
+; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
+; CHECK-NEXT: retq # encoding: [0xc3]
+entry:
+ %0 = extractelement <2 x double> %a, i64 0
+ %1 = extractelement <2 x double> %b, i64 0
+ %2 = extractelement <2 x double> %c, i64 0
+ %sub = fsub fast double -0.000000e+00, %2
+ %3 = fmul fast double %1, %0
+ %4 = fsub fast double %sub, %3
+ %5 = bitcast i8 %k to <8 x i1>
+ %6 = extractelement <8 x i1> %5, i64 0
+ %7 = select i1 %6, double %4, double %2
+ %8 = insertelement <2 x double> %a, double %7, i64 0
+ ret <2 x double> %8
+}
More information about the llvm-commits
mailing list