[llvm] r250817 - [x86] Fix AVX maskload/store intrinsic prototypes.
Eric Christopher via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 20 17:27:54 PDT 2015
Interesting, were these getting called from the front end correctly?
-eric
On Tue, Oct 20, 2015 at 4:22 AM Andrea Di Biagio via llvm-commits <
llvm-commits at lists.llvm.org> wrote:
> Author: adibiagio
> Date: Tue Oct 20 06:20:13 2015
> New Revision: 250817
>
> URL: http://llvm.org/viewvc/llvm-project?rev=250817&view=rev
> Log:
> [x86] Fix AVX maskload/store intrinsic prototypes.
>
> The mask value type for maskload/maskstore GCC builtins is never a vector
> of
> packed floats/doubles.
>
> This patch fixes the following issues:
> 1. The mask argument for builtin_ia32_maskloadpd and
> builtin_ia32_maskstorepd
> should be of type llvm_v2i64_ty and not llvm_v2f64_ty.
> 2. The mask argument for builtin_ia32_maskloadpd256 and
> builtin_ia32_maskstorepd256 should be of type llvm_v4i64_ty and not
> llvm_v4f64_ty.
> 3. The mask argument for builtin_ia32_maskloadps and
> builtin_ia32_maskstoreps
> should be of type llvm_v4i32_ty and not llvm_v4f32_ty.
> 4. The mask argument for builtin_ia32_maskloadps256 and
> builtin_ia32_maskstoreps256 should be of type llvm_v8i32_ty and not
> llvm_v8f32_ty.
>
> Differential Revision: http://reviews.llvm.org/D13776
>
> Modified:
> llvm/trunk/include/llvm/IR/IntrinsicsX86.td
> llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
> llvm/trunk/test/CodeGen/X86/avx-load-store.ll
> llvm/trunk/test/CodeGen/X86/avx-win64.ll
>
> Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=250817&r1=250816&r2=250817&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
> +++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Tue Oct 20 06:20:13 2015
> @@ -1760,16 +1760,16 @@ let TargetPrefix = "x86" in { // All in
> // Conditional load ops
> let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
> def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
> - Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty],
> + Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
> [IntrReadArgMem]>;
> def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
> - Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty],
> + Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
> [IntrReadArgMem]>;
> def int_x86_avx_maskload_pd_256 :
> GCCBuiltin<"__builtin_ia32_maskloadpd256">,
> - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty],
> + Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
> [IntrReadArgMem]>;
> def int_x86_avx_maskload_ps_256 :
> GCCBuiltin<"__builtin_ia32_maskloadps256">,
> - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty],
> + Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
> [IntrReadArgMem]>;
> def int_x86_avx512_mask_loadu_ps_512 :
> GCCBuiltin<"__builtin_ia32_loadups512_mask">,
> Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty,
> llvm_i16_ty],
> @@ -1789,18 +1789,18 @@ let TargetPrefix = "x86" in { // All in
> let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
> def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
> Intrinsic<[], [llvm_ptr_ty,
> - llvm_v2f64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>;
> + llvm_v2i64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>;
> def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
> Intrinsic<[], [llvm_ptr_ty,
> - llvm_v4f32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
> + llvm_v4i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
> def int_x86_avx_maskstore_pd_256 :
> GCCBuiltin<"__builtin_ia32_maskstorepd256">,
> Intrinsic<[], [llvm_ptr_ty,
> - llvm_v4f64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
> + llvm_v4i64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
> def int_x86_avx_maskstore_ps_256 :
> GCCBuiltin<"__builtin_ia32_maskstoreps256">,
> Intrinsic<[], [llvm_ptr_ty,
> - llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
> + llvm_v8i32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
> def int_x86_avx512_mask_storeu_ps_512 :
> GCCBuiltin<"__builtin_ia32_storeups512_mask">,
> Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=250817&r1=250816&r2=250817&view=diff
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Tue Oct 20 06:20:13
> 2015
> @@ -2536,102 +2536,102 @@ define <32 x i8> @test_x86_avx_ldu_dq_25
> declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly
>
>
> -define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x double> %a1) {
> +define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) {
> ; CHECK-LABEL: test_x86_avx_maskload_pd:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0
> ; CHECK-NEXT: retl
> - %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x
> double> %a1) ; <<2 x double>> [#uses=1]
> + %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64>
> %mask) ; <<2 x double>> [#uses=1]
> ret <2 x double> %res
> }
> -declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x double>)
> nounwind readonly
> +declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind
> readonly
>
>
> -define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x double>
> %a1) {
> +define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64>
> %mask) {
> ; CHECK-LABEL: test_x86_avx_maskload_pd_256:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0
> ; CHECK-NEXT: retl
> - %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x
> double> %a1) ; <<4 x double>> [#uses=1]
> + %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x
> i64> %mask) ; <<4 x double>> [#uses=1]
> ret <4 x double> %res
> }
> -declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x double>)
> nounwind readonly
> +declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>)
> nounwind readonly
>
>
> -define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x float> %a1) {
> +define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) {
> ; CHECK-LABEL: test_x86_avx_maskload_ps:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovps (%eax), %xmm0, %xmm0
> ; CHECK-NEXT: retl
> - %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x float>
> %a1) ; <<4 x float>> [#uses=1]
> + %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32>
> %mask) ; <<4 x float>> [#uses=1]
> ret <4 x float> %res
> }
> -declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x float>) nounwind
> readonly
> +declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind
> readonly
>
>
> -define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x float>
> %a1) {
> +define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32>
> %mask) {
> ; CHECK-LABEL: test_x86_avx_maskload_ps_256:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovps (%eax), %ymm0, %ymm0
> ; CHECK-NEXT: retl
> - %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x
> float> %a1) ; <<8 x float>> [#uses=1]
> + %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x
> i32> %mask) ; <<8 x float>> [#uses=1]
> ret <8 x float> %res
> }
> -declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x float>)
> nounwind readonly
> +declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>)
> nounwind readonly
>
>
> -define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x double> %a1, <2 x
> double> %a2) {
> +define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x
> double> %a2) {
> ; CHECK-LABEL: test_x86_avx_maskstore_pd:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax)
> ; CHECK-NEXT: retl
> - call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x double> %a1, <2 x
> double> %a2)
> + call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x
> double> %a2)
> ret void
> }
> -declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x double>, <2 x double>)
> nounwind
> +declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>)
> nounwind
>
>
> -define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x double> %a1, <4
> x double> %a2) {
> +define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x
> double> %a2) {
> ; CHECK-LABEL: test_x86_avx_maskstore_pd_256:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax)
> ; CHECK-NEXT: vzeroupper
> ; CHECK-NEXT: retl
> - call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x double> %a1, <4
> x double> %a2)
> + call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x
> double> %a2)
> ret void
> }
> -declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x double>, <4 x
> double>) nounwind
> +declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>)
> nounwind
>
>
> -define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x float> %a1, <4 x
> float> %a2) {
> +define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x
> float> %a2) {
> ; CHECK-LABEL: test_x86_avx_maskstore_ps:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovps %xmm1, %xmm0, (%eax)
> ; CHECK-NEXT: retl
> - call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x float> %a1, <4 x
> float> %a2)
> + call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x
> float> %a2)
> ret void
> }
> -declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x float>, <4 x float>)
> nounwind
> +declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>)
> nounwind
>
>
> -define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x float> %a1, <8 x
> float> %a2) {
> +define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x
> float> %a2) {
> ; CHECK-LABEL: test_x86_avx_maskstore_ps_256:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> ; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax)
> ; CHECK-NEXT: vzeroupper
> ; CHECK-NEXT: retl
> - call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x float> %a1, <8
> x float> %a2)
> + call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8
> x float> %a2)
> ret void
> }
> -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x
> float>) nounwind
> +declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)
> nounwind
>
>
> define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x
> double> %a1) {
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=250817&r1=250816&r2=250817&view=diff
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Tue Oct 20 06:20:13 2015
> @@ -88,7 +88,7 @@ entry:
> ret void
> }
>
> -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x
> float>) nounwind
> +declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)
> nounwind
>
> ; CHECK_O0: _f_f
> ; CHECK-O0: vmovss LCPI
> @@ -105,7 +105,7 @@ cif_mask_mixed:
> br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
>
> cif_mixed_test_all: ; preds =
> %cif_mask_mixed
> - call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float
> 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float
> 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00,
> float 0.000000e+00>, <8 x float> undef) nounwind
> + call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1,
> i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef)
> nounwind
> unreachable
>
> cif_mixed_test_any_check: ; preds =
> %cif_mask_mixed
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-win64.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-win64.ll?rev=250817&r1=250816&r2=250817&view=diff
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-win64.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-win64.ll Tue Oct 20 06:20:13 2015
> @@ -42,6 +42,4 @@ safe_if_after_false:
> }
>
> declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
> -declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x float>)
> nounwind readonly
> -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x
> float>) nounwind
> declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>,
> <8 x float>) nounwind readnone
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20151021/78fb1a15/attachment.html>
More information about the llvm-commits
mailing list