[llvm] e933c05 - [X86] Add fadd/fsub/fmul tests showing failure to concat operands together and perform as a wider vector

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 26 08:20:38 PDT 2024


Author: Simon Pilgrim
Date: 2024-03-26T15:03:41Z
New Revision: e933c05cd2cb97974cfca9544ebbdb9af349d602

URL: https://github.com/llvm/llvm-project/commit/e933c05cd2cb97974cfca9544ebbdb9af349d602
DIFF: https://github.com/llvm/llvm-project/commit/e933c05cd2cb97974cfca9544ebbdb9af349d602.diff

LOG: [X86] Add fadd/fsub/fmul tests showing failure to concat operands together and perform as a wider vector

We don't want to concat fadd/fsub/fmul if both operands would need concatenating (as the fp op is usually cheaper than the concat), but if at least one operand is free to concat (i.e. constant or extracted from a wider vector), then we should try to concat the fp op.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/widen_fadd.ll
    llvm/test/CodeGen/X86/widen_fmul.ll
    llvm/test/CodeGen/X86/widen_fsub.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed4368044c..8277d532bfce29 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
 
 define void @widen_fadd_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-LABEL: widen_fadd_v2f32_v4f32:
@@ -364,3 +364,69 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
   store <2 x float> %vc14, ptr %c14, align 4
   ret void
 }
+
+define <8 x float> @widen_fadd_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fadd_v4f32_v8f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT:    addps %xmm2, %xmm0
+; SSE-NEXT:    addps %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX-NEXT:    vaddps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vaddps %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x float> %r
+}
+
+define <16 x float> @widen_fadd_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fadd_v4f32_v16f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT:    addps %xmm4, %xmm0
+; SSE-NEXT:    addps %xmm4, %xmm1
+; SSE-NEXT:    addps %xmm4, %xmm2
+; SSE-NEXT:    addps %xmm4, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1OR2-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastss {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX1OR2-NEXT:    vaddps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vaddps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT:    vaddps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT:    retq
+;
+; AVX512-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX512-NEXT:    vaddps %xmm4, %xmm0, %xmm0
+; AVX512-NEXT:    vaddps %xmm4, %xmm1, %xmm1
+; AVX512-NEXT:    vaddps %xmm4, %xmm2, %xmm2
+; AVX512-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %z2 = fadd <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %w2 = fadd <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x float> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da9ee11a7..3adc5b48539bbf 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
 
 define void @widen_fmul_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-LABEL: widen_fmul_v2f32_v4f32:
@@ -364,3 +364,69 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
   store <2 x float> %vc14, ptr %c14, align 4
   ret void
 }
+
+define <8 x float> @widen_fmul_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fmul_v4f32_v8f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT:    mulps %xmm2, %xmm0
+; SSE-NEXT:    mulps %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX-NEXT:    vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmulps %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x float> %r
+}
+
+define <16 x float> @widen_fmul_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fmul_v4f32_v16f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT:    mulps %xmm4, %xmm0
+; SSE-NEXT:    mulps %xmm4, %xmm1
+; SSE-NEXT:    mulps %xmm4, %xmm2
+; SSE-NEXT:    mulps %xmm4, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1OR2-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastss {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX1OR2-NEXT:    vmulps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vmulps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT:    vmulps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT:    vmulps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT:    retq
+;
+; AVX512-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX512-NEXT:    vmulps %xmm4, %xmm0, %xmm0
+; AVX512-NEXT:    vmulps %xmm4, %xmm1, %xmm1
+; AVX512-NEXT:    vmulps %xmm4, %xmm2, %xmm2
+; AVX512-NEXT:    vmulps %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %z2 = fmul <4 x float> %z, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %w2 = fmul <4 x float> %w, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+  %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x float> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455ba61fc6..eb318181c69860 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
 
 define void @widen_fsub_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-LABEL: widen_fsub_v2f32_v4f32:
@@ -364,3 +364,69 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
   store <2 x float> %vc14, ptr %c14, align 4
   ret void
 }
+
+define <8 x float> @widen_fsub_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fsub_v4f32_v8f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT:    subps %xmm2, %xmm0
+; SSE-NEXT:    subps %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX-NEXT:    vsubps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vsubps %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x float> %r
+}
+
+define <16 x float> @widen_fsub_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fsub_v4f32_v16f32_const:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT:    subps %xmm4, %xmm0
+; SSE-NEXT:    subps %xmm4, %xmm1
+; SSE-NEXT:    subps %xmm4, %xmm2
+; SSE-NEXT:    subps %xmm4, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX1OR2-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastss {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX1OR2-NEXT:    vsubps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vsubps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT:    vsubps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT:    vsubps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm1
+; AVX1OR2-NEXT:    retq
+;
+; AVX512-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX512-NEXT:    vsubps %xmm4, %xmm0, %xmm0
+; AVX512-NEXT:    vsubps %xmm4, %xmm1, %xmm1
+; AVX512-NEXT:    vsubps %xmm4, %xmm2, %xmm2
+; AVX512-NEXT:    vsubps %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %z2 = fsub <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %w2 = fsub <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x float> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}


        


More information about the llvm-commits mailing list