[llvm] [X86] Add test coverage for the concatenation of ISD::FROUND intrinsics (PR #170166)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 1 08:35:35 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Simon Pilgrim (RKSimon)
<details>
<summary>Changes</summary>
These were missed in #<!-- -->170160
---
Patch is 20.14 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/170166.diff
1 Files Affected:
- (added) llvm/test/CodeGen/X86/combine-fround.ll (+425)
``````````diff
diff --git a/llvm/test/CodeGen/X86/combine-fround.ll b/llvm/test/CodeGen/X86/combine-fround.ll
new file mode 100644
index 0000000000000..44865544ecb00
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-fround.ll
@@ -0,0 +1,425 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_round_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_round_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; SSE-NEXT: movapd %xmm0, %xmm3
+; SSE-NEXT: andpd %xmm2, %xmm3
+; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; SSE-NEXT: orpd %xmm4, %xmm3
+; SSE-NEXT: addpd %xmm0, %xmm3
+; SSE-NEXT: roundpd $11, %xmm3, %xmm0
+; SSE-NEXT: andpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm4, %xmm2
+; SSE-NEXT: addpd %xmm1, %xmm2
+; SSE-NEXT: roundpd $11, %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_round_v4f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; AVX1OR2-NEXT: # xmm2 = mem[0,0]
+; AVX1OR2-NEXT: vandpd %xmm2, %xmm0, %xmm3
+; AVX1OR2-NEXT: vmovddup {{.*#+}} xmm4 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX1OR2-NEXT: # xmm4 = mem[0,0]
+; AVX1OR2-NEXT: vorpd %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vaddpd %xmm3, %xmm0, %xmm0
+; AVX1OR2-NEXT: vandpd %xmm2, %xmm1, %xmm2
+; AVX1OR2-NEXT: vorpd %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v4f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 | (ymm0 & m64bcst)
+; AVX512-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.round.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.round.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_round_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_round_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: andps %xmm2, %xmm3
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; SSE-NEXT: orps %xmm4, %xmm3
+; SSE-NEXT: addps %xmm0, %xmm3
+; SSE-NEXT: roundps $11, %xmm3, %xmm0
+; SSE-NEXT: andps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm4, %xmm2
+; SSE-NEXT: addps %xmm1, %xmm2
+; SSE-NEXT: roundps $11, %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_round_v8f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX1OR2-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} xmm4 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX1OR2-NEXT: vorps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vaddps %xmm3, %xmm0, %xmm0
+; AVX1OR2-NEXT: vandps %xmm2, %xmm1, %xmm2
+; AVX1OR2-NEXT: vorps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v8f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpternlogd {{.*#+}} ymm2 = ymm2 | (ymm0 & m32bcst)
+; AVX512-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.round.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.round.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_round_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_round_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
+; SSE-NEXT: movapd %xmm0, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: movapd {{.*#+}} xmm6 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm0, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm0
+; SSE-NEXT: movapd %xmm1, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm1, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm1
+; SSE-NEXT: movapd %xmm2, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm2, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm2
+; SSE-NEXT: andpd %xmm3, %xmm4
+; SSE-NEXT: orpd %xmm6, %xmm4
+; SSE-NEXT: addpd %xmm3, %xmm4
+; SSE-NEXT: roundpd $11, %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: concat_round_v8f64_v2f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
+; AVX1-NEXT: # xmm4 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vmovddup {{.*#+}} xmm6 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX1-NEXT: # xmm6 = mem[0,0]
+; AVX1-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandpd %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandpd %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vorpd %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vaddpd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_round_v8f64_v2f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
+; AVX2-NEXT: # xmm4 = mem[0,0]
+; AVX2-NEXT: vandpd %xmm4, %xmm0, %xmm5
+; AVX2-NEXT: vmovddup {{.*#+}} xmm6 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX2-NEXT: # xmm6 = mem[0,0]
+; AVX2-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddpd %xmm5, %xmm0, %xmm0
+; AVX2-NEXT: vandpd %xmm4, %xmm1, %xmm5
+; AVX2-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddpd %xmm5, %xmm1, %xmm1
+; AVX2-NEXT: vandpd %xmm4, %xmm2, %xmm5
+; AVX2-NEXT: vorpd %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddpd %xmm5, %xmm2, %xmm2
+; AVX2-NEXT: vandpd %xmm4, %xmm3, %xmm4
+; AVX2-NEXT: vorpd %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX2-NEXT: vaddpd %xmm4, %xmm3, %xmm1
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & m64bcst)
+; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.round.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.round.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.round.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.round.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_round_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_round_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; SSE-NEXT: movaps %xmm0, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: movaps {{.*#+}} xmm6 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm0, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm0
+; SSE-NEXT: movaps %xmm1, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm1, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm1
+; SSE-NEXT: movaps %xmm2, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm2, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm2
+; SSE-NEXT: andps %xmm3, %xmm4
+; SSE-NEXT: orps %xmm6, %xmm4
+; SSE-NEXT: addps %xmm3, %xmm4
+; SSE-NEXT: roundps $11, %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: concat_round_v16f32_v4f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX1-NEXT: vandps %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX1-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddps %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddps %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vandps %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vaddps %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vorps %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_round_v16f32_v4f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX2-NEXT: vandps %xmm4, %xmm0, %xmm5
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm6 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX2-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddps %xmm5, %xmm0, %xmm0
+; AVX2-NEXT: vandps %xmm4, %xmm1, %xmm5
+; AVX2-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddps %xmm5, %xmm1, %xmm1
+; AVX2-NEXT: vandps %xmm4, %xmm2, %xmm5
+; AVX2-NEXT: vorps %xmm6, %xmm5, %xmm5
+; AVX2-NEXT: vaddps %xmm5, %xmm2, %xmm2
+; AVX2-NEXT: vandps %xmm4, %xmm3, %xmm4
+; AVX2-NEXT: vorps %xmm6, %xmm4, %xmm4
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %xmm4, %xmm3, %xmm1
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm1 = [1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607]
+; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 | (zmm0 & m32bcst)
+; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.round.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.round.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.round.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.round.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_round_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_round_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
+; SSE-NEXT: movapd %xmm0, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: movapd {{.*#+}} xmm6 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm0, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm0
+; SSE-NEXT: movapd %xmm1, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm1, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm1
+; SSE-NEXT: movapd %xmm2, %xmm5
+; SSE-NEXT: andpd %xmm4, %xmm5
+; SSE-NEXT: orpd %xmm6, %xmm5
+; SSE-NEXT: addpd %xmm2, %xmm5
+; SSE-NEXT: roundpd $11, %xmm5, %xmm2
+; SSE-NEXT: andpd %xmm3, %xmm4
+; SSE-NEXT: orpd %xmm6, %xmm4
+; SSE-NEXT: addpd %xmm3, %xmm4
+; SSE-NEXT: roundpd $11, %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: concat_round_v8f64_v4f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX1-NEXT: vandpd %ymm2, %ymm0, %ymm3
+; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX1-NEXT: vorpd %ymm4, %ymm3, %ymm3
+; AVX1-NEXT: vaddpd %ymm3, %ymm0, %ymm0
+; AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX1-NEXT: vandpd %ymm2, %ymm1, %ymm2
+; AVX1-NEXT: vorpd %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_round_v8f64_v4f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX2-NEXT: vandpd %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX2-NEXT: vorpd %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vaddpd %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX2-NEXT: vandpd %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vorpd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911,4602678819172646911]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm0 & m64bcst)
+; AVX512-NEXT: vaddpd %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.round.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.round.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_round_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_round_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; SSE-NEXT: movaps %xmm0, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: movaps {{.*#+}} xmm6 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm0, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm0
+; SSE-NEXT: movaps %xmm1, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm1, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm1
+; SSE-NEXT: movaps %xmm2, %xmm5
+; SSE-NEXT: andps %xmm4, %xmm5
+; SSE-NEXT: orps %xmm6, %xmm5
+; SSE-NEXT: addps %xmm2, %xmm5
+; SSE-NEXT: roundps $11, %xmm5, %xmm2
+; SSE-NEXT: andps %xmm3, %xmm4
+; SSE-NEXT: orps %xmm6, %xmm4
+; SSE-NEXT: addps %xmm3, %xmm4
+; SSE-NEXT: roundps $11, %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: concat_round_v16f32_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm3
+; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX1-NEXT: vorps %ymm4, %ymm3, %ymm3
+; AVX1-NEXT: vaddps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm2
+; AVX1-NEXT: vorps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_round_v16f32_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX2-NEXT: vandps %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX2-NEXT: vorps %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vaddps %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX2-NEXT: vandps %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vorps %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_round_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607,1056964607]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 | (zmm0 & m32bcst)
+; AVX512-NEXT: vaddps %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.round.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.round.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/170166
More information about the llvm-commits
mailing list