[llvm] [X86] Add tests showing failure to concat matching SITOFP/UITOFP vector ops (PR #172852)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 18 05:56:41 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Simon Pilgrim (RKSimon)
<details>
<summary>Changes</summary>
Tests have to perform an additional FADD to prevent combineConcatVectorOfCasts from performing the fold - we're trying to show when this fails to occur during a combineConcatVectorOps recursion
Interestingly, due to uitofp expansion AVX1/2 is often managing to concat where AVX512 can't
---
Patch is 45.19 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/172852.diff
2 Files Affected:
- (added) llvm/test/CodeGen/X86/combine-sitofp.ll (+245)
- (added) llvm/test/CodeGen/X86/combine-uitofp.ll (+681)
``````````diff
diff --git a/llvm/test/CodeGen/X86/combine-sitofp.ll b/llvm/test/CodeGen/X86/combine-sitofp.ll
new file mode 100644
index 0000000000000..4a9412d920b58
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-sitofp.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
+
+define <4 x double> @concat_sitofp_v4f64_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x double> %b0, <2 x double> %b1) {
+; SSE-LABEL: concat_sitofp_v4f64_v2i32:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
+; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_sitofp_v4f64_v2i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
+; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vaddpd %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %c0 = sitofp <2 x i32> %a0 to <2 x double>
+ %c1 = sitofp <2 x i32> %a1 to <2 x double>
+ %v0 = fadd <2 x double> %b0, %c0
+ %v1 = fadd <2 x double> %b1, %c1
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_sitofp_v8f32_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x float> %b0, <4 x float> %b1) {
+; SSE-LABEL: concat_sitofp_v8f32_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: concat_sitofp_v8f32_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
+; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
+; AVX-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vaddps %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %c0 = sitofp <4 x i32> %a0 to <4 x float>
+ %c1 = sitofp <4 x i32> %a1 to <4 x float>
+ %v0 = fadd <4 x float> %b0, %c0
+ %v1 = fadd <4 x float> %b1, %c1
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_sitofp_v8f64_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2, <2 x i32> %a3, <2 x double> %b0, <2 x double> %b1, <2 x double> %b2, <2 x double> %b3) {
+; SSE-LABEL: concat_sitofp_v8f64_v2i32:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
+; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
+; SSE-NEXT: cvtdq2pd %xmm2, %xmm2
+; SSE-NEXT: cvtdq2pd %xmm3, %xmm3
+; SSE-NEXT: addpd %xmm4, %xmm0
+; SSE-NEXT: addpd %xmm5, %xmm1
+; SSE-NEXT: addpd %xmm6, %xmm2
+; SSE-NEXT: addpd %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_sitofp_v8f64_v2i32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vcvtdq2pd %xmm0, %xmm0
+; AVX1OR2-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX1OR2-NEXT: vcvtdq2pd %xmm2, %xmm2
+; AVX1OR2-NEXT: vcvtdq2pd %xmm3, %xmm3
+; AVX1OR2-NEXT: vaddpd %xmm0, %xmm4, %xmm0
+; AVX1OR2-NEXT: vaddpd %xmm1, %xmm5, %xmm1
+; AVX1OR2-NEXT: vaddpd %xmm2, %xmm6, %xmm2
+; AVX1OR2-NEXT: vaddpd %xmm3, %xmm7, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_sitofp_v8f64_v2i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtdq2pd %xmm0, %xmm0
+; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1
+; AVX512-NEXT: vcvtdq2pd %xmm2, %xmm2
+; AVX512-NEXT: vcvtdq2pd %xmm3, %xmm3
+; AVX512-NEXT: vaddpd %xmm0, %xmm4, %xmm0
+; AVX512-NEXT: vaddpd %xmm1, %xmm5, %xmm1
+; AVX512-NEXT: vaddpd %xmm2, %xmm6, %xmm2
+; AVX512-NEXT: vaddpd %xmm3, %xmm7, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %c0 = sitofp <2 x i32> %a0 to <2 x double>
+ %c1 = sitofp <2 x i32> %a1 to <2 x double>
+ %c2 = sitofp <2 x i32> %a2 to <2 x double>
+ %c3 = sitofp <2 x i32> %a3 to <2 x double>
+ %v0 = fadd <2 x double> %b0, %c0
+ %v1 = fadd <2 x double> %b1, %c1
+ %v2 = fadd <2 x double> %b2, %c2
+ %v3 = fadd <2 x double> %b3, %c3
+ %r01 = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %r23 = shufflevector <2 x double> %v2, <2 x double> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x double> %r01, <4 x double> %r23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_sitofp_v16f32_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3, <4 x float> %b0, <4 x float> %b1, <4 x float> %b2, <4 x float> %b3) {
+; SSE-LABEL: concat_sitofp_v16f32_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE-NEXT: cvtdq2ps %xmm3, %xmm3
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm2
+; SSE-NEXT: addps %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_sitofp_v16f32_v4i32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vcvtdq2ps %xmm0, %xmm0
+; AVX1OR2-NEXT: vcvtdq2ps %xmm1, %xmm1
+; AVX1OR2-NEXT: vcvtdq2ps %xmm2, %xmm2
+; AVX1OR2-NEXT: vcvtdq2ps %xmm3, %xmm3
+; AVX1OR2-NEXT: vaddps %xmm0, %xmm4, %xmm0
+; AVX1OR2-NEXT: vaddps %xmm1, %xmm5, %xmm1
+; AVX1OR2-NEXT: vaddps %xmm2, %xmm6, %xmm2
+; AVX1OR2-NEXT: vaddps %xmm3, %xmm7, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_sitofp_v16f32_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtdq2ps %xmm0, %xmm0
+; AVX512-NEXT: vcvtdq2ps %xmm1, %xmm1
+; AVX512-NEXT: vcvtdq2ps %xmm2, %xmm2
+; AVX512-NEXT: vcvtdq2ps %xmm3, %xmm3
+; AVX512-NEXT: vaddps %xmm0, %xmm4, %xmm0
+; AVX512-NEXT: vaddps %xmm1, %xmm5, %xmm1
+; AVX512-NEXT: vaddps %xmm2, %xmm6, %xmm2
+; AVX512-NEXT: vaddps %xmm3, %xmm7, %xmm3
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %c0 = sitofp <4 x i32> %a0 to <4 x float>
+ %c1 = sitofp <4 x i32> %a1 to <4 x float>
+ %c2 = sitofp <4 x i32> %a2 to <4 x float>
+ %c3 = sitofp <4 x i32> %a3 to <4 x float>
+ %v0 = fadd <4 x float> %b0, %c0
+ %v1 = fadd <4 x float> %b1, %c1
+ %v2 = fadd <4 x float> %b2, %c2
+ %v3 = fadd <4 x float> %b3, %c3
+ %r01 = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r23 = shufflevector <4 x float> %v2, <4 x float> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x float> %r01, <8 x float> %r23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
+
+define <8 x double> @concat_sitofp_v8f64_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x double> %b0, <4 x double> %b1) {
+; SSE-LABEL: concat_sitofp_v8f64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; SSE-NEXT: cvtdq2pd %xmm6, %xmm6
+; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
+; SSE-NEXT: cvtdq2pd %xmm7, %xmm7
+; SSE-NEXT: cvtdq2pd %xmm1, %xmm8
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm6
+; SSE-NEXT: addpd %xmm4, %xmm8
+; SSE-NEXT: addpd %xmm5, %xmm7
+; SSE-NEXT: movapd %xmm6, %xmm1
+; SSE-NEXT: movapd %xmm8, %xmm2
+; SSE-NEXT: movapd %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_sitofp_v8f64_v4i32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX1OR2-NEXT: vcvtdq2pd %xmm1, %ymm1
+; AVX1OR2-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX1OR2-NEXT: vaddpd %ymm1, %ymm3, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_sitofp_v8f64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX512-NEXT: vcvtdq2pd %xmm1, %ymm1
+; AVX512-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vaddpd %ymm1, %ymm3, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %c0 = sitofp <4 x i32> %a0 to <4 x double>
+ %c1 = sitofp <4 x i32> %a1 to <4 x double>
+ %v0 = fadd <4 x double> %b0, %c0
+ %v1 = fadd <4 x double> %b1, %c1
+ %res = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x double> %res
+}
+
+define <16 x float> @concat_sitofp_v16f32_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x float> %b0, <8 x float> %b1) {
+; SSE-LABEL: concat_sitofp_v16f32_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
+; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; SSE-NEXT: cvtdq2ps %xmm3, %xmm3
+; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm2
+; SSE-NEXT: addps %xmm7, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: concat_sitofp_v16f32_v8i32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1OR2-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1OR2-NEXT: vaddps %ymm0, %ymm2, %ymm0
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm3, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: concat_sitofp_v16f32_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX512-NEXT: vaddps %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: vaddps %ymm1, %ymm3, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %c0 = sitofp <8 x i32> %a0 to <8 x float>
+ %c1 = sitofp <8 x i32> %a1 to <8 x float>
+ %v0 = fadd <8 x float> %b0, %c0
+ %v1 = fadd <8 x float> %b1, %c1
+ %res = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %res
+}
diff --git a/llvm/test/CodeGen/X86/combine-uitofp.ll b/llvm/test/CodeGen/X86/combine-uitofp.ll
new file mode 100644
index 0000000000000..a3642f99ff83a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-uitofp.ll
@@ -0,0 +1,681 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512
+
+define <4 x double> @concat_uitofp_v4f64_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x double> %b0, <2 x double> %b1) {
+; SSE2-LABEL: concat_uitofp_v4f64_v2i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm4, %xmm4
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm5 = [4.503599627370496E+15,4.503599627370496E+15]
+; SSE2-NEXT: orpd %xmm5, %xmm0
+; SSE2-NEXT: subpd %xmm5, %xmm0
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-NEXT: orpd %xmm5, %xmm1
+; SSE2-NEXT: subpd %xmm5, %xmm1
+; SSE2-NEXT: addpd %xmm2, %xmm0
+; SSE2-NEXT: addpd %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: concat_uitofp_v4f64_v2i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE4-NEXT: movdqa {{.*#+}} xmm4 = [4.503599627370496E+15,4.503599627370496E+15]
+; SSE4-NEXT: por %xmm4, %xmm0
+; SSE4-NEXT: subpd %xmm4, %xmm0
+; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; SSE4-NEXT: por %xmm4, %xmm1
+; SSE4-NEXT: subpd %xmm4, %xmm1
+; SSE4-NEXT: addpd %xmm2, %xmm0
+; SSE4-NEXT: addpd %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: concat_uitofp_v4f64_v2i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_uitofp_v4f64_v2i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4841369599423283200,4841369599423283200,4841369599423283200,4841369599423283200]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-4.503599627370496E+15,-4.503599627370496E+15,-4.503599627370496E+15,-4.503599627370496E+15]
+; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_uitofp_v4f64_v2i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2pd %xmm0, %xmm0
+; AVX512-NEXT: vcvtudq2pd %xmm1, %xmm1
+; AVX512-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vaddpd %xmm1, %xmm3, %xmm1
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %c0 = uitofp <2 x i32> %a0 to <2 x double>
+ %c1 = uitofp <2 x i32> %a1 to <2 x double>
+ %v0 = fadd <2 x double> %b0, %c0
+ %v1 = fadd <2 x double> %b1, %c1
+ %res = shufflevector <2 x double> %v0, <2 x double> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x double> %res
+}
+
+define <8 x float> @concat_uitofp_v8f32_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x float> %b0, <4 x float> %b1) {
+; SSE2-LABEL: concat_uitofp_v8f32_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [1258291200,1258291200,1258291200,1258291200]
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [1392508928,1392508928,1392508928,1392508928]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movaps {{.*#+}} xmm8 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE2-NEXT: subps %xmm8, %xmm0
+; SSE2-NEXT: addps %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: subps %xmm8, %xmm1
+; SSE2-NEXT: addps %xmm4, %xmm1
+; SSE2-NEXT: addps %xmm2, %xmm0
+; SSE2-NEXT: addps %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: concat_uitofp_v8f32_v4i32:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa {{.*#+}} xmm4 = [1258291200,1258291200,1258291200,1258291200]
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; SSE4-NEXT: psrld $16, %xmm0
+; SSE4-NEXT: movdqa {{.*#+}} xmm6 = [1392508928,1392508928,1392508928,1392508928]
+; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7]
+; SSE4-NEXT: movaps {{.*#+}} xmm7 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE4-NEXT: subps %xmm7, %xmm0
+; SSE4-NEXT: addps %xmm5, %xmm0
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE4-NEXT: psrld $16, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
+; SSE4-NEXT: subps %xmm7, %xmm1
+; SSE4-NEXT: addps %xmm4, %xmm1
+; SSE4-NEXT: addps %xmm2, %xmm0
+; SSE4-NEXT: addps %xmm3, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX1-LABEL: concat_uitofp_v8f32_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm4
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [1392508928,1392508928,1392508928,1392508928]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2],xmm5[3],xmm4[4],xmm5[5],xmm4[6],xmm5[7]
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1258291200,1258291200,1258291200,1258291200]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7]
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm7
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2],xmm5[3],xmm7[4],xmm5[5],xmm7[6],xmm5[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm1
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_uitofp_v8f32_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],mem[1],ymm1[2],mem[3],ymm1[4],mem[5],ymm1[6],mem[7],ymm1[8],mem[9],ymm1[10],mem[11],ymm1[12],mem[13],ymm1[14],mem[15]
+; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm4 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1],ymm0[2],mem[3],ymm0[4],mem[5],ymm0[6],mem[7],ymm0[8],mem[9],ymm0[10],mem[11],ymm0[12],mem[13],ymm0[14],mem[15]
+; AVX2-NEXT: vsubps %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: concat_uitofp_v8f32_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT: vcvtudq2ps %xmm1, %xmm1
+; AVX512-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vaddps %xmm1, %xmm3, %xmm1
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %c0 = uitofp <4 x i32> %a0 to <4 x float>
+ %c1 = uitofp <4 x i32> %a1 to <4 x float>
+ %v0 = fadd <4 x float> %b0, %c0
+ %v1 = fadd <4 x float> %b1, %c1
+ %res = shufflevector <4 x float> %v0, <4 x float> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %res
+}
+
+define <8 x double> @concat_uitofp_v8f64_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32> %a2, <2 x i32> %a3, <2 x double> %b0, <2 x double> %b1, <2 x double> %b2, <2 x double> %b3) {
+; SSE2-LABEL: concat_uitofp_v8f64_v2i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm8, %xmm8
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm9 = [4.503599627370496E+15,4.503599627370496E+15]
+; SSE2-NEXT: orpd %xmm9, %xmm0
+; SSE2-NEXT: subpd %xmm9, %xmm0
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
+; SSE2-NEXT: orpd %xmm9, %xmm1
+; SSE2-NEXT: subpd %xmm9, %xmm1
+; SSE2-NEXT: unpcklps {{.*...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/172852
More information about the llvm-commits
mailing list