[llvm] r277948 - [AVX-512] Add andnps/andnpd to the avx512vl stack folding test.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 6 22:39:48 PDT 2016


Author: ctopper
Date: Sun Aug  7 00:39:48 2016
New Revision: 277948

URL: http://llvm.org/viewvc/llvm-project?rev=277948&view=rev
Log:
[AVX-512] Add andnps/andnpd to the avx512vl stack folding test.

Modified:
    llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll

Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll?rev=277948&r1=277947&r2=277948&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll Sun Aug  7 00:39:48 2016
@@ -78,6 +78,62 @@ define <4 x float> @stack_fold_addss_int
   ret <4 x float> %5
 }
 
+define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
+  ;CHECK-LABEL: stack_fold_andnpd
+  ;CHECK:       vpandnq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+  %2 = bitcast <2 x double> %a0 to <2 x i64>
+  %3 = bitcast <2 x double> %a1 to <2 x i64>
+  %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+  %5 = and <2 x i64> %4, %3
+  %6 = bitcast <2 x i64> %5 to <2 x double>
+  ; fadd forces execution domain
+  %7 = fadd <2 x double> %6, <double 0x0, double 0x0>
+  ret <2 x double> %7
+}
+
+define <4 x double> @stack_fold_andnpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+  ;CHECK-LABEL: stack_fold_andnpd_ymm
+  ;CHECK:       vpandnq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+  %2 = bitcast <4 x double> %a0 to <4 x i64>
+  %3 = bitcast <4 x double> %a1 to <4 x i64>
+  %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
+  %5 = and <4 x i64> %4, %3
+  %6 = bitcast <4 x i64> %5 to <4 x double>
+  ; fadd forces execution domain
+  %7 = fadd <4 x double> %6, <double 0x0, double 0x0, double 0x0, double 0x0>
+  ret <4 x double> %7
+}
+
+define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_andnps
+  ;CHECK:       vpandnq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+  %2 = bitcast <4 x float> %a0 to <2 x i64>
+  %3 = bitcast <4 x float> %a1 to <2 x i64>
+  %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+  %5 = and <2 x i64> %4, %3
+  %6 = bitcast <2 x i64> %5 to <4 x float>
+  ; fadd forces execution domain
+  %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %7
+}
+
+define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_andnps_ymm
+  ;CHECK:       vpandnq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+  %2 = bitcast <8 x float> %a0 to <4 x i64>
+  %3 = bitcast <8 x float> %a1 to <4 x i64>
+  %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
+  %5 = and <4 x i64> %4, %3
+  %6 = bitcast <4 x i64> %5 to <8 x float>
+  ; fadd forces execution domain
+  %7 = fadd <8 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <8 x float> %7
+}
+
 define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
   ;CHECK-LABEL: stack_fold_andpd
   ;CHECK:       vpandq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload




More information about the llvm-commits mailing list