[llvm] [X86] Add tests showing failure to concat fp rounding intrinsics together. (PR #170108)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 1 03:36:50 PST 2025
https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/170108
None
>From 4bd90bba86fa42b499537d04b8688cab9d3c056e Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 1 Dec 2025 11:35:57 +0000
Subject: [PATCH] [X86] Add tests showing failure to concat fp rounding
intrinsics together.
---
llvm/test/CodeGen/X86/combine-fceil.ll | 175 ++++++++++++++++++++
llvm/test/CodeGen/X86/combine-fnearbyint.ll | 175 ++++++++++++++++++++
llvm/test/CodeGen/X86/combine-frint.ll | 175 ++++++++++++++++++++
llvm/test/CodeGen/X86/combine-froundeven.ll | 175 ++++++++++++++++++++
llvm/test/CodeGen/X86/combine-ftrunc.ll | 175 ++++++++++++++++++++
llvm/test/CodeGen/X86/combine-rndscale.ll | 144 ++++++++++++++++
6 files changed, 1019 insertions(+)
create mode 100644 llvm/test/CodeGen/X86/combine-fceil.ll
create mode 100644 llvm/test/CodeGen/X86/combine-fnearbyint.ll
create mode 100644 llvm/test/CodeGen/X86/combine-frint.ll
create mode 100644 llvm/test/CodeGen/X86/combine-froundeven.ll
create mode 100644 llvm/test/CodeGen/X86/combine-ftrunc.ll
create mode 100644 llvm/test/CodeGen/X86/combine-rndscale.ll
diff --git a/llvm/test/CodeGen/X86/combine-fceil.ll b/llvm/test/CodeGen/X86/combine-fceil.ll
new file mode 100644
index 0000000000000..78f1476a49152
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-fceil.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_ceil_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_ceil_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $10, %xmm0, %xmm0
+; SSE-NEXT: roundpd $10, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_ceil_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $10, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $10, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_ceil_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_ceil_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $10, %xmm0, %xmm0
+; SSE-NEXT: roundps $10, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_ceil_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $10, %xmm0, %xmm0
+; AVX-NEXT: vroundps $10, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_ceil_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_ceil_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $10, %xmm0, %xmm0
+; SSE-NEXT: roundpd $10, %xmm1, %xmm1
+; SSE-NEXT: roundpd $10, %xmm2, %xmm2
+; SSE-NEXT: roundpd $10, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_ceil_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $10, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $10, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $10, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $10, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_ceil_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $10, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $10, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $10, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $10, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_ceil_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_ceil_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $10, %xmm0, %xmm0
+; SSE-NEXT: roundps $10, %xmm1, %xmm1
+; SSE-NEXT: roundps $10, %xmm2, %xmm2
+; SSE-NEXT: roundps $10, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_ceil_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $10, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $10, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $10, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $10, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_ceil_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $10, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $10, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $10, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $10, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_ceil_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_ceil_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $10, %xmm0, %xmm0
+; SSE-NEXT: roundpd $10, %xmm1, %xmm1
+; SSE-NEXT: roundpd $10, %xmm2, %xmm2
+; SSE-NEXT: roundpd $10, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_ceil_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $10, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $10, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_ceil_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $10, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $10, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.ceil.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.ceil.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_ceil_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_ceil_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $10, %xmm0, %xmm0
+; SSE-NEXT: roundps $10, %xmm1, %xmm1
+; SSE-NEXT: roundps $10, %xmm2, %xmm2
+; SSE-NEXT: roundps $10, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_ceil_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $10, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $10, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_ceil_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $10, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $10, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.ceil.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.ceil.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-fnearbyint.ll b/llvm/test/CodeGen/X86/combine-fnearbyint.ll
new file mode 100644
index 0000000000000..14d1017aec630
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-fnearbyint.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_nearbyint_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_nearbyint_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $12, %xmm0, %xmm0
+; SSE-NEXT: roundpd $12, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_nearbyint_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $12, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $12, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_nearbyint_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_nearbyint_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $12, %xmm0, %xmm0
+; SSE-NEXT: roundps $12, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_nearbyint_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $12, %xmm0, %xmm0
+; AVX-NEXT: vroundps $12, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_nearbyint_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_nearbyint_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $12, %xmm0, %xmm0
+; SSE-NEXT: roundpd $12, %xmm1, %xmm1
+; SSE-NEXT: roundpd $12, %xmm2, %xmm2
+; SSE-NEXT: roundpd $12, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_nearbyint_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $12, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $12, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $12, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $12, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_nearbyint_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $12, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $12, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $12, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $12, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_nearbyint_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_nearbyint_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $12, %xmm0, %xmm0
+; SSE-NEXT: roundps $12, %xmm1, %xmm1
+; SSE-NEXT: roundps $12, %xmm2, %xmm2
+; SSE-NEXT: roundps $12, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_nearbyint_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $12, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $12, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $12, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $12, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_nearbyint_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $12, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $12, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $12, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $12, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_nearbyint_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_nearbyint_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $12, %xmm0, %xmm0
+; SSE-NEXT: roundpd $12, %xmm1, %xmm1
+; SSE-NEXT: roundpd $12, %xmm2, %xmm2
+; SSE-NEXT: roundpd $12, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_nearbyint_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $12, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $12, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_nearbyint_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $12, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $12, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_nearbyint_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_nearbyint_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $12, %xmm0, %xmm0
+; SSE-NEXT: roundps $12, %xmm1, %xmm1
+; SSE-NEXT: roundps $12, %xmm2, %xmm2
+; SSE-NEXT: roundps $12, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_nearbyint_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $12, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $12, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_nearbyint_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $12, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $12, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-frint.ll b/llvm/test/CodeGen/X86/combine-frint.ll
new file mode 100644
index 0000000000000..901ce2c1f0d82
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-frint.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_rint_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_rint_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $4, %xmm0, %xmm0
+; SSE-NEXT: roundpd $4, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_rint_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_rint_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_rint_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $4, %xmm0, %xmm0
+; SSE-NEXT: roundps $4, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_rint_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_rint_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_rint_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $4, %xmm0, %xmm0
+; SSE-NEXT: roundpd $4, %xmm1, %xmm1
+; SSE-NEXT: roundpd $4, %xmm2, %xmm2
+; SSE-NEXT: roundpd $4, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_rint_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_rint_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $4, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $4, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_rint_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_rint_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $4, %xmm0, %xmm0
+; SSE-NEXT: roundps $4, %xmm1, %xmm1
+; SSE-NEXT: roundps $4, %xmm2, %xmm2
+; SSE-NEXT: roundps $4, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_rint_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_rint_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $4, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $4, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_rint_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_rint_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $4, %xmm0, %xmm0
+; SSE-NEXT: roundpd $4, %xmm1, %xmm1
+; SSE-NEXT: roundpd $4, %xmm2, %xmm2
+; SSE-NEXT: roundpd $4, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_rint_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $4, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $4, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_rint_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $4, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $4, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.rint.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_rint_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_rint_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $4, %xmm0, %xmm0
+; SSE-NEXT: roundps $4, %xmm1, %xmm1
+; SSE-NEXT: roundps $4, %xmm2, %xmm2
+; SSE-NEXT: roundps $4, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_rint_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $4, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $4, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_rint_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $4, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $4, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.rint.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.rint.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-froundeven.ll b/llvm/test/CodeGen/X86/combine-froundeven.ll
new file mode 100644
index 0000000000000..484e3a9680450
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-froundeven.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_roundeven_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_roundeven_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $8, %xmm0, %xmm0
+; SSE-NEXT: roundpd $8, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_roundeven_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $8, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $8, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_roundeven_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_roundeven_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $8, %xmm0, %xmm0
+; SSE-NEXT: roundps $8, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_roundeven_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $8, %xmm0, %xmm0
+; AVX-NEXT: vroundps $8, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_roundeven_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_roundeven_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $8, %xmm0, %xmm0
+; SSE-NEXT: roundpd $8, %xmm1, %xmm1
+; SSE-NEXT: roundpd $8, %xmm2, %xmm2
+; SSE-NEXT: roundpd $8, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_roundeven_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $8, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $8, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $8, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $8, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundeven_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $8, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $8, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $8, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $8, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_roundeven_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_roundeven_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $8, %xmm0, %xmm0
+; SSE-NEXT: roundps $8, %xmm1, %xmm1
+; SSE-NEXT: roundps $8, %xmm2, %xmm2
+; SSE-NEXT: roundps $8, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_roundeven_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $8, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $8, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $8, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $8, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundeven_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $8, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $8, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $8, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $8, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_roundeven_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_roundeven_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $8, %xmm0, %xmm0
+; SSE-NEXT: roundpd $8, %xmm1, %xmm1
+; SSE-NEXT: roundpd $8, %xmm2, %xmm2
+; SSE-NEXT: roundpd $8, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_roundeven_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $8, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $8, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundeven_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $8, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $8, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_roundeven_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_roundeven_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $8, %xmm0, %xmm0
+; SSE-NEXT: roundps $8, %xmm1, %xmm1
+; SSE-NEXT: roundps $8, %xmm2, %xmm2
+; SSE-NEXT: roundps $8, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_roundeven_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $8, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $8, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundeven_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $8, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $8, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-ftrunc.ll b/llvm/test/CodeGen/X86/combine-ftrunc.ll
new file mode 100644
index 0000000000000..a6c703a1cbeae
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-ftrunc.ll
@@ -0,0 +1,175 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_trunc_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; SSE-LABEL: concat_trunc_v4f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $11, %xmm0, %xmm0
+; SSE-NEXT: roundpd $11, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_trunc_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $11, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a1)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_trunc_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; SSE-LABEL: concat_trunc_v8f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $11, %xmm0, %xmm0
+; SSE-NEXT: roundps $11, %xmm1, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_trunc_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX-NEXT: vroundps $11, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a1)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_trunc_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; SSE-LABEL: concat_trunc_v8f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $11, %xmm0, %xmm0
+; SSE-NEXT: roundpd $11, %xmm1, %xmm1
+; SSE-NEXT: roundpd $11, %xmm2, %xmm2
+; SSE-NEXT: roundpd $11, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_trunc_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $11, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $11, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $11, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_trunc_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $11, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $11, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $11, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a0)
+ %v1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a1)
+ %v2 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a2)
+ %v3 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a3)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_trunc_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; SSE-LABEL: concat_trunc_v16f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $11, %xmm0, %xmm0
+; SSE-NEXT: roundps $11, %xmm1, %xmm1
+; SSE-NEXT: roundps $11, %xmm2, %xmm2
+; SSE-NEXT: roundps $11, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_trunc_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $11, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $11, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $11, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_trunc_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $11, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $11, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $11, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a0)
+ %v1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a1)
+ %v2 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a2)
+ %v3 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a3)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_trunc_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; SSE-LABEL: concat_trunc_v8f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: roundpd $11, %xmm0, %xmm0
+; SSE-NEXT: roundpd $11, %xmm1, %xmm1
+; SSE-NEXT: roundpd $11, %xmm2, %xmm2
+; SSE-NEXT: roundpd $11, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_trunc_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_trunc_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $11, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.trunc.v4f64(<4 x double> %a0)
+ %v1 = call <4 x double> @llvm.trunc.v4f64(<4 x double> %a1)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_trunc_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; SSE-LABEL: concat_trunc_v16f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: roundps $11, %xmm0, %xmm0
+; SSE-NEXT: roundps $11, %xmm1, %xmm1
+; SSE-NEXT: roundps $11, %xmm2, %xmm2
+; SSE-NEXT: roundps $11, %xmm3, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_trunc_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_trunc_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $11, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.trunc.v8f32(<8 x float> %a0)
+ %v1 = call <8 x float> @llvm.trunc.v8f32(<8 x float> %a1)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-rndscale.ll b/llvm/test/CodeGen/X86/combine-rndscale.ll
new file mode 100644
index 0000000000000..25117e864b512
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-rndscale.ll
@@ -0,0 +1,144 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_roundpd_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) {
+; AVX-LABEL: concat_roundpd_v4f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4)
+ %v1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a1, i32 4)
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_roundps_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) {
+; AVX-LABEL: concat_roundps_v8f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4)
+ %v1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a1, i32 4)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_roundpd_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) {
+; AVX1OR2-LABEL: concat_roundpd_v8f64_v2f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundpd $4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundpd $4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundpd_v8f64_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $4, %xmm0, %xmm0
+; AVX512-NEXT: vroundpd $4, %xmm1, %xmm1
+; AVX512-NEXT: vroundpd $4, %xmm2, %xmm2
+; AVX512-NEXT: vroundpd $4, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4)
+ %v1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a1, i32 4)
+ %v2 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a2, i32 4)
+ %v3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a3, i32 4)
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_roundps_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) {
+; AVX1OR2-LABEL: concat_roundps_v16f32_v4f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vroundps $4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vroundps $4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundps_v16f32_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $4, %xmm0, %xmm0
+; AVX512-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX512-NEXT: vroundps $4, %xmm2, %xmm2
+; AVX512-NEXT: vroundps $4, %xmm3, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4)
+ %v1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a1, i32 4)
+ %v2 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a2, i32 4)
+ %v3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a3, i32 4)
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_roundpd_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) {
+; AVX1OR2-LABEL: concat_roundpd_v8f64_v4f64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundpd $4, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundpd $4, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundpd_v8f64_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundpd $4, %ymm0, %ymm0
+; AVX512-NEXT: vroundpd $4, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 4)
+ %v1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a1, i32 4)
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_roundps_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) {
+; AVX1OR2-LABEL: concat_roundps_v16f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vroundps $4, %ymm0, %ymm0
+; AVX1OR2-NEXT: vroundps $4, %ymm1, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_roundps_v16f32_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vroundps $4, %ymm0, %ymm0
+; AVX512-NEXT: vroundps $4, %ymm1, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %v0 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 4)
+ %v1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a1, i32 4)
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+; negative test - rounding mode mismatch
+define <8 x float> @concat_roundps_v8f32_v4f32_mismatch(<4 x float> %a0, <4 x float> %a1) {
+; AVX-LABEL: concat_roundps_v8f32_v4f32_mismatch:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $0, %xmm0, %xmm0
+; AVX-NEXT: vroundps $4, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %v0 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 0)
+ %v1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a1, i32 4)
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
More information about the llvm-commits
mailing list