[llvm] r277320 - [AVX512] Replace scalar fp arithmetic intrinsics with native IR in an AVX512 test. The intrinsics aren't lowered to AVX512 instructions.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 31 21:29:16 PDT 2016
Author: ctopper
Date: Sun Jul 31 23:29:16 2016
New Revision: 277320
URL: http://llvm.org/viewvc/llvm-project?rev=277320&view=rev
Log:
[AVX512] Replace scalar fp arithmetic intrinsics with native IR in an AVX512 test. The intrinsics aren't lowered to AVX512 instructions.
The intrinsics really should be removed and autoupgraded.
Modified:
llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll?rev=277320&r1=277319&r2=277320&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll Sun Jul 31 23:29:16 2016
@@ -52,10 +52,12 @@ define <2 x double> @stack_fold_addsd_in
;CHECK-LABEL: stack_fold_addsd_int
;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fadd double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_addss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_addss
@@ -69,10 +71,12 @@ define <4 x float> @stack_fold_addss_int
;CHECK-LABEL: stack_fold_addss_int
;CHECK: vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fadd float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_andpd
@@ -130,10 +134,12 @@ define <2 x double> @stack_fold_divsd_in
;CHECK-LABEL: stack_fold_divsd_int
;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fdiv double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_divss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_divss
@@ -147,10 +153,12 @@ define <4 x float> @stack_fold_divss_int
;CHECK-LABEL: stack_fold_divss_int
;CHECK: vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fdiv float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.div.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @stack_fold_insertps(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_insertps
@@ -174,10 +182,12 @@ define <2 x double> @stack_fold_mulsd_in
;CHECK-LABEL: stack_fold_mulsd_int
;CHECK: vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fmul double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_mulss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_mulss
@@ -191,10 +201,12 @@ define <4 x float> @stack_fold_mulss_int
;CHECK-LABEL: stack_fold_mulss_int
;CHECK: vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fmul float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_orpd
@@ -292,10 +304,12 @@ define <2 x double> @stack_fold_subsd_in
;CHECK-LABEL: stack_fold_subsd_int
;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fsub double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_subss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_subss
@@ -309,10 +323,12 @@ define <4 x float> @stack_fold_subss_int
;CHECK-LABEL: stack_fold_subss_int
;CHECK: vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fsub float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_xorpd
More information about the llvm-commits
mailing list