[llvm] 9ebd0db - [NFC][Codegen][X86] Improve test coverage for insertions into XMM vector
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 25 11:08:28 PDT 2021
Author: Roman Lebedev
Date: 2021-07-25T21:08:03+03:00
New Revision: 9ebd0dbf0f69f2b2ccc71c13d698748afd0ab626
URL: https://github.com/llvm/llvm-project/commit/9ebd0dbf0f69f2b2ccc71c13d698748afd0ab626
DIFF: https://github.com/llvm/llvm-project/commit/9ebd0dbf0f69f2b2ccc71c13d698748afd0ab626.diff
LOG: [NFC][Codegen][X86] Improve test coverage for insertions into XMM vector
Added:
llvm/test/CodeGen/X86/sse-insertelt-from-mem.ll
llvm/test/CodeGen/X86/sse-insertelt.ll
Modified:
llvm/test/CodeGen/X86/avx-insertelt.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/avx-insertelt.ll b/llvm/test/CodeGen/X86/avx-insertelt.ll
index 2229f6aa6c76..3f5d004841e8 100644
--- a/llvm/test/CodeGen/X86/avx-insertelt.ll
+++ b/llvm/test/CodeGen/X86/avx-insertelt.ll
@@ -4,27 +4,27 @@
; 0'th element insertion into an AVX register.
-define <8 x float> @insert_f32_firstelt_of_low_subvector(<8 x float> %y, float %f, <8 x float> %x) {
+define <8 x float> @insert_f32_firstelt_of_low_subvector(<8 x float> %x, float %s) {
; ALL-LABEL: insert_f32_firstelt_of_low_subvector:
; ALL: # %bb.0:
; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; ALL-NEXT: retq
- %i0 = insertelement <8 x float> %y, float %f, i32 0
+ %i0 = insertelement <8 x float> %x, float %s, i32 0
ret <8 x float> %i0
}
-define <4 x double> @insert_f64_firstelt_of_low_subvector(<4 x double> %y, double %f, <4 x double> %x) {
+define <4 x double> @insert_f64_firstelt_of_low_subvector(<4 x double> %x, double %s) {
; ALL-LABEL: insert_f64_firstelt_of_low_subvector:
; ALL: # %bb.0:
; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; ALL-NEXT: retq
- %i0 = insertelement <4 x double> %y, double %f, i32 0
+ %i0 = insertelement <4 x double> %x, double %s, i32 0
ret <4 x double> %i0
}
-define <32 x i8> @insert_i8_firstelt_of_low_subvector(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+define <32 x i8> @insert_i8_firstelt_of_low_subvector(<32 x i8> %x, i8 %s) {
; AVX-LABEL: insert_i8_firstelt_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
@@ -36,11 +36,11 @@ define <32 x i8> @insert_i8_firstelt_of_low_subvector(<32 x i8> %y, i8 %f, <32 x
; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <32 x i8> %y, i8 %f, i32 0
+ %i0 = insertelement <32 x i8> %x, i8 %s, i32 0
ret <32 x i8> %i0
}
-define <16 x i16> @insert_i16_firstelt_of_low_subvector(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+define <16 x i16> @insert_i16_firstelt_of_low_subvector(<16 x i16> %x, i16 %s) {
; AVX-LABEL: insert_i16_firstelt_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
@@ -52,11 +52,11 @@ define <16 x i16> @insert_i16_firstelt_of_low_subvector(<16 x i16> %y, i16 %f, <
; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <16 x i16> %y, i16 %f, i32 0
+ %i0 = insertelement <16 x i16> %x, i16 %s, i32 0
ret <16 x i16> %i0
}
-define <8 x i32> @insert_i32_firstelt_of_low_subvector(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+define <8 x i32> @insert_i32_firstelt_of_low_subvector(<8 x i32> %x, i32 %s) {
; AVX-LABEL: insert_i32_firstelt_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
@@ -68,11 +68,11 @@ define <8 x i32> @insert_i32_firstelt_of_low_subvector(<8 x i32> %y, i32 %f, <8
; AVX2-NEXT: vmovd %edi, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <8 x i32> %y, i32 %f, i32 0
+ %i0 = insertelement <8 x i32> %x, i32 %s, i32 0
ret <8 x i32> %i0
}
-define <4 x i64> @insert_i64_firstelt_of_low_subvector(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+define <4 x i64> @insert_i64_firstelt_of_low_subvector(<4 x i64> %x, i64 %s) {
; AVX-LABEL: insert_i64_firstelt_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
@@ -84,35 +84,35 @@ define <4 x i64> @insert_i64_firstelt_of_low_subvector(<4 x i64> %y, i64 %f, <4
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <4 x i64> %y, i64 %f, i32 0
+ %i0 = insertelement <4 x i64> %x, i64 %s, i32 0
ret <4 x i64> %i0
}
; 0'th element of high subvector insertion into an AVX register.
-define <8 x float> @insert_f32_firstelt_of_high_subvector(<8 x float> %y, float %f, <8 x float> %x) {
+define <8 x float> @insert_f32_firstelt_of_high_subvector(<8 x float> %x, float %s) {
; ALL-LABEL: insert_f32_firstelt_of_high_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; ALL-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <8 x float> %y, float %f, i32 4
+ %i0 = insertelement <8 x float> %x, float %s, i32 4
ret <8 x float> %i0
}
-define <4 x double> @insert_f64_firstelt_of_high_subvector(<4 x double> %y, double %f, <4 x double> %x) {
+define <4 x double> @insert_f64_firstelt_of_high_subvector(<4 x double> %x, double %s) {
; ALL-LABEL: insert_f64_firstelt_of_high_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; ALL-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <4 x double> %y, double %f, i32 2
+ %i0 = insertelement <4 x double> %x, double %s, i32 2
ret <4 x double> %i0
}
-define <32 x i8> @insert_i8_firstelt_of_high_subvector(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+define <32 x i8> @insert_i8_firstelt_of_high_subvector(<32 x i8> %x, i8 %s) {
; AVX-LABEL: insert_i8_firstelt_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -126,11 +126,11 @@ define <32 x i8> @insert_i8_firstelt_of_high_subvector(<32 x i8> %y, i8 %f, <32
; AVX2-NEXT: vpinsrb $0, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <32 x i8> %y, i8 %f, i32 16
+ %i0 = insertelement <32 x i8> %x, i8 %s, i32 16
ret <32 x i8> %i0
}
-define <16 x i16> @insert_i16_firstelt_of_high_subvector(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+define <16 x i16> @insert_i16_firstelt_of_high_subvector(<16 x i16> %x, i16 %s) {
; AVX-LABEL: insert_i16_firstelt_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -144,11 +144,11 @@ define <16 x i16> @insert_i16_firstelt_of_high_subvector(<16 x i16> %y, i16 %f,
; AVX2-NEXT: vpinsrw $0, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <16 x i16> %y, i16 %f, i32 8
+ %i0 = insertelement <16 x i16> %x, i16 %s, i32 8
ret <16 x i16> %i0
}
-define <8 x i32> @insert_i32_firstelt_of_high_subvector(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+define <8 x i32> @insert_i32_firstelt_of_high_subvector(<8 x i32> %x, i32 %s) {
; AVX-LABEL: insert_i32_firstelt_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -162,11 +162,11 @@ define <8 x i32> @insert_i32_firstelt_of_high_subvector(<8 x i32> %y, i32 %f, <8
; AVX2-NEXT: vpinsrd $0, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <8 x i32> %y, i32 %f, i32 4
+ %i0 = insertelement <8 x i32> %x, i32 %s, i32 4
ret <8 x i32> %i0
}
-define <4 x i64> @insert_i64_firstelt_of_high_subvector(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+define <4 x i64> @insert_i64_firstelt_of_high_subvector(<4 x i64> %x, i64 %s) {
; AVX-LABEL: insert_i64_firstelt_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -180,13 +180,13 @@ define <4 x i64> @insert_i64_firstelt_of_high_subvector(<4 x i64> %y, i64 %f, <4
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <4 x i64> %y, i64 %f, i32 2
+ %i0 = insertelement <4 x i64> %x, i64 %s, i32 2
ret <4 x i64> %i0
}
; element insertion into 0'th element of both subvectors
-define <8 x float> @insert_f32_firstelts(<8 x float> %y, float %f, <8 x float> %x) {
+define <8 x float> @insert_f32_firstelts(<8 x float> %x, float %s) {
; ALL-LABEL: insert_f32_firstelts:
; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3]
@@ -194,12 +194,12 @@ define <8 x float> @insert_f32_firstelts(<8 x float> %y, float %f, <8 x float> %
; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <8 x float> %y, float %f, i32 0
- %i1 = insertelement <8 x float> %i0, float %f, i32 4
+ %i0 = insertelement <8 x float> %x, float %s, i32 0
+ %i1 = insertelement <8 x float> %i0, float %s, i32 4
ret <8 x float> %i1
}
-define <4 x double> @insert_f64_firstelts(<4 x double> %y, double %f, <4 x double> %x) {
+define <4 x double> @insert_f64_firstelts(<4 x double> %x, double %s) {
; ALL-LABEL: insert_f64_firstelts:
; ALL: # %bb.0:
; ALL-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm0[2,3]
@@ -207,12 +207,12 @@ define <4 x double> @insert_f64_firstelts(<4 x double> %y, double %f, <4 x doubl
; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <4 x double> %y, double %f, i32 0
- %i1 = insertelement <4 x double> %i0, double %f, i32 2
+ %i0 = insertelement <4 x double> %x, double %s, i32 0
+ %i1 = insertelement <4 x double> %i0, double %s, i32 2
ret <4 x double> %i1
}
-define <32 x i8> @insert_i8_firstelts(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+define <32 x i8> @insert_i8_firstelts(<32 x i8> %x, i8 %s) {
; AVX-LABEL: insert_i8_firstelts:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
@@ -228,12 +228,12 @@ define <32 x i8> @insert_i8_firstelts(<32 x i8> %y, i8 %f, <32 x i8> %x) {
; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <32 x i8> %y, i8 %f, i32 0
- %i1 = insertelement <32 x i8> %i0, i8 %f, i32 16
+ %i0 = insertelement <32 x i8> %x, i8 %s, i32 0
+ %i1 = insertelement <32 x i8> %i0, i8 %s, i32 16
ret <32 x i8> %i1
}
-define <16 x i16> @insert_i16_firstelts(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+define <16 x i16> @insert_i16_firstelts(<16 x i16> %x, i16 %s) {
; AVX-LABEL: insert_i16_firstelts:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
@@ -249,12 +249,12 @@ define <16 x i16> @insert_i16_firstelts(<16 x i16> %y, i16 %f, <16 x i16> %x) {
; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <16 x i16> %y, i16 %f, i32 0
- %i1 = insertelement <16 x i16> %i0, i16 %f, i32 8
+ %i0 = insertelement <16 x i16> %x, i16 %s, i32 0
+ %i1 = insertelement <16 x i16> %i0, i16 %s, i32 8
ret <16 x i16> %i1
}
-define <8 x i32> @insert_i32_firstelts(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+define <8 x i32> @insert_i32_firstelts(<8 x i32> %x, i32 %s) {
; AVX-LABEL: insert_i32_firstelts:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
@@ -271,12 +271,12 @@ define <8 x i32> @insert_i32_firstelts(<8 x i32> %y, i32 %f, <8 x i32> %x) {
; AVX2-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <8 x i32> %y, i32 %f, i32 0
- %i1 = insertelement <8 x i32> %i0, i32 %f, i32 4
+ %i0 = insertelement <8 x i32> %x, i32 %s, i32 0
+ %i1 = insertelement <8 x i32> %i0, i32 %s, i32 4
ret <8 x i32> %i1
}
-define <4 x i64> @insert_i64_firstelts(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+define <4 x i64> @insert_i64_firstelts(<4 x i64> %x, i64 %s) {
; AVX-LABEL: insert_i64_firstelts:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
@@ -292,37 +292,37 @@ define <4 x i64> @insert_i64_firstelts(<4 x i64> %y, i64 %f, <4 x i64> %x) {
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <4 x i64> %y, i64 %f, i32 0
- %i1 = insertelement <4 x i64> %i0, i64 %f, i32 2
+ %i0 = insertelement <4 x i64> %x, i64 %s, i32 0
+ %i1 = insertelement <4 x i64> %i0, i64 %s, i32 2
ret <4 x i64> %i1
}
; element insertion into two elements of high subvector
-define <8 x float> @insert_f32_two_elts_of_high_subvector(<8 x float> %y, float %f, <8 x float> %x) {
+define <8 x float> @insert_f32_two_elts_of_high_subvector(<8 x float> %x, float %s) {
; ALL-LABEL: insert_f32_two_elts_of_high_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
; ALL-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,3]
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <8 x float> %y, float %f, i32 4
- %i1 = insertelement <8 x float> %i0, float %f, i32 5
+ %i0 = insertelement <8 x float> %x, float %s, i32 4
+ %i1 = insertelement <8 x float> %i0, float %s, i32 5
ret <8 x float> %i1
}
-define <4 x double> @insert_f64_two_elts_of_high_subvector(<4 x double> %y, double %f, <4 x double> %x) {
+define <4 x double> @insert_f64_two_elts_of_high_subvector(<4 x double> %x, double %s) {
; ALL-LABEL: insert_f64_two_elts_of_high_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: retq
- %i0 = insertelement <4 x double> %y, double %f, i32 2
- %i1 = insertelement <4 x double> %i0, double %f, i32 3
+ %i0 = insertelement <4 x double> %x, double %s, i32 2
+ %i1 = insertelement <4 x double> %i0, double %s, i32 3
ret <4 x double> %i1
}
-define <32 x i8> @insert_i8_two_elts_of_high_subvector(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+define <32 x i8> @insert_i8_two_elts_of_high_subvector(<32 x i8> %x, i8 %s) {
; AVX-LABEL: insert_i8_two_elts_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -338,12 +338,12 @@ define <32 x i8> @insert_i8_two_elts_of_high_subvector(<32 x i8> %y, i8 %f, <32
; AVX2-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <32 x i8> %y, i8 %f, i32 16
- %i1 = insertelement <32 x i8> %i0, i8 %f, i32 17
+ %i0 = insertelement <32 x i8> %x, i8 %s, i32 16
+ %i1 = insertelement <32 x i8> %i0, i8 %s, i32 17
ret <32 x i8> %i1
}
-define <16 x i16> @insert_i16_two_elts_of_high_subvector(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+define <16 x i16> @insert_i16_two_elts_of_high_subvector(<16 x i16> %x, i16 %s) {
; AVX-LABEL: insert_i16_two_elts_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -359,12 +359,12 @@ define <16 x i16> @insert_i16_two_elts_of_high_subvector(<16 x i16> %y, i16 %f,
; AVX2-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <16 x i16> %y, i16 %f, i32 8
- %i1 = insertelement <16 x i16> %i0, i16 %f, i32 9
+ %i0 = insertelement <16 x i16> %x, i16 %s, i32 8
+ %i1 = insertelement <16 x i16> %i0, i16 %s, i32 9
ret <16 x i16> %i1
}
-define <8 x i32> @insert_i32_two_elts_of_high_subvector(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+define <8 x i32> @insert_i32_two_elts_of_high_subvector(<8 x i32> %x, i32 %s) {
; AVX-LABEL: insert_i32_two_elts_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
@@ -380,12 +380,12 @@ define <8 x i32> @insert_i32_two_elts_of_high_subvector(<8 x i32> %y, i32 %f, <8
; AVX2-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <8 x i32> %y, i32 %f, i32 4
- %i1 = insertelement <8 x i32> %i0, i32 %f, i32 5
+ %i0 = insertelement <8 x i32> %x, i32 %s, i32 4
+ %i1 = insertelement <8 x i32> %i0, i32 %s, i32 5
ret <8 x i32> %i1
}
-define <4 x i64> @insert_i64_two_elts_of_high_subvector(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+define <4 x i64> @insert_i64_two_elts_of_high_subvector(<4 x i64> %x, i64 %s) {
; AVX-LABEL: insert_i64_two_elts_of_high_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
@@ -399,36 +399,36 @@ define <4 x i64> @insert_i64_two_elts_of_high_subvector(<4 x i64> %y, i64 %f, <4
; AVX2-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
- %i0 = insertelement <4 x i64> %y, i64 %f, i32 2
- %i1 = insertelement <4 x i64> %i0, i64 %f, i32 3
+ %i0 = insertelement <4 x i64> %x, i64 %s, i32 2
+ %i1 = insertelement <4 x i64> %i0, i64 %s, i32 3
ret <4 x i64> %i1
}
; element insertion into two elements of low subvector
-define <8 x float> @insert_f32_two_elts_of_low_subvector(<8 x float> %y, float %f, <8 x float> %x) {
+define <8 x float> @insert_f32_two_elts_of_low_subvector(<8 x float> %x, float %s) {
; ALL-LABEL: insert_f32_two_elts_of_low_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; ALL-NEXT: retq
- %i0 = insertelement <8 x float> %y, float %f, i32 0
- %i1 = insertelement <8 x float> %i0, float %f, i32 1
+ %i0 = insertelement <8 x float> %x, float %s, i32 0
+ %i1 = insertelement <8 x float> %i0, float %s, i32 1
ret <8 x float> %i1
}
-define <4 x double> @insert_f64_two_elts_of_low_subvector(<4 x double> %y, double %f, <4 x double> %x) {
+define <4 x double> @insert_f64_two_elts_of_low_subvector(<4 x double> %x, double %s) {
; ALL-LABEL: insert_f64_two_elts_of_low_subvector:
; ALL: # %bb.0:
; ALL-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; ALL-NEXT: retq
- %i0 = insertelement <4 x double> %y, double %f, i32 0
- %i1 = insertelement <4 x double> %i0, double %f, i32 1
+ %i0 = insertelement <4 x double> %x, double %s, i32 0
+ %i1 = insertelement <4 x double> %i0, double %s, i32 1
ret <4 x double> %i1
}
-define <32 x i8> @insert_i8_two_elts_of_low_subvector(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+define <32 x i8> @insert_i8_two_elts_of_low_subvector(<32 x i8> %x, i8 %s) {
; AVX-LABEL: insert_i8_two_elts_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
@@ -442,12 +442,12 @@ define <32 x i8> @insert_i8_two_elts_of_low_subvector(<32 x i8> %y, i8 %f, <32 x
; AVX2-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <32 x i8> %y, i8 %f, i32 0
- %i1 = insertelement <32 x i8> %i0, i8 %f, i32 1
+ %i0 = insertelement <32 x i8> %x, i8 %s, i32 0
+ %i1 = insertelement <32 x i8> %i0, i8 %s, i32 1
ret <32 x i8> %i1
}
-define <16 x i16> @insert_i16_two_elts_of_low_subvector(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+define <16 x i16> @insert_i16_two_elts_of_low_subvector(<16 x i16> %x, i16 %s) {
; AVX-LABEL: insert_i16_two_elts_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
@@ -461,12 +461,12 @@ define <16 x i16> @insert_i16_two_elts_of_low_subvector(<16 x i16> %y, i16 %f, <
; AVX2-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <16 x i16> %y, i16 %f, i32 0
- %i1 = insertelement <16 x i16> %i0, i16 %f, i32 1
+ %i0 = insertelement <16 x i16> %x, i16 %s, i32 0
+ %i1 = insertelement <16 x i16> %i0, i16 %s, i32 1
ret <16 x i16> %i1
}
-define <8 x i32> @insert_i32_two_elts_of_low_subvector(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+define <8 x i32> @insert_i32_two_elts_of_low_subvector(<8 x i32> %x, i32 %s) {
; AVX-LABEL: insert_i32_two_elts_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
@@ -481,12 +481,12 @@ define <8 x i32> @insert_i32_two_elts_of_low_subvector(<8 x i32> %y, i32 %f, <8
; AVX2-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <8 x i32> %y, i32 %f, i32 0
- %i1 = insertelement <8 x i32> %i0, i32 %f, i32 1
+ %i0 = insertelement <8 x i32> %x, i32 %s, i32 0
+ %i1 = insertelement <8 x i32> %i0, i32 %s, i32 1
ret <8 x i32> %i1
}
-define <4 x i64> @insert_i64_two_elts_of_low_subvector(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+define <4 x i64> @insert_i64_two_elts_of_low_subvector(<4 x i64> %x, i64 %s) {
; AVX-LABEL: insert_i64_two_elts_of_low_subvector:
; AVX: # %bb.0:
; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
@@ -500,7 +500,7 @@ define <4 x i64> @insert_i64_two_elts_of_low_subvector(<4 x i64> %y, i64 %f, <4
; AVX2-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
- %i0 = insertelement <4 x i64> %y, i64 %f, i32 0
- %i1 = insertelement <4 x i64> %i0, i64 %f, i32 1
+ %i0 = insertelement <4 x i64> %x, i64 %s, i32 0
+ %i1 = insertelement <4 x i64> %i0, i64 %s, i32 1
ret <4 x i64> %i1
}
diff --git a/llvm/test/CodeGen/X86/sse-insertelt-from-mem.ll b/llvm/test/CodeGen/X86/sse-insertelt-from-mem.ll
new file mode 100644
index 000000000000..7e48f5d4afe1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-insertelt-from-mem.ll
@@ -0,0 +1,469 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+
+; 0'th element insertion into an SSE register.
+
+define <4 x float> @insert_f32_firstelt(<4 x float> %x, float* %s.addr) {
+; SSE2-LABEL: insert_f32_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f32_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f32_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX-NEXT: retq
+ %s = load float, float* %s.addr
+ %i0 = insertelement <4 x float> %x, float %s, i32 0
+ ret <4 x float> %i0
+}
+
+define <2 x double> @insert_f64_firstelt(<2 x double> %x, double* %s.addr) {
+; SSE-LABEL: insert_f64_firstelt:
+; SSE: # %bb.0:
+; SSE-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f64_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; AVX-NEXT: retq
+ %s = load double, double* %s.addr
+ %i0 = insertelement <2 x double> %x, double %s, i32 0
+ ret <2 x double> %i0
+}
+
+define <16 x i8> @insert_i8_firstelt(<16 x i8> %x, i8* %s.addr) {
+; SSE2-LABEL: insert_i8_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movzbl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrb $0, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrb $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i8, i8* %s.addr
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 0
+ ret <16 x i8> %i0
+}
+
+define <8 x i16> @insert_i16_firstelt(<8 x i16> %x, i16* %s.addr) {
+; SSE-LABEL: insert_i16_firstelt:
+; SSE: # %bb.0:
+; SSE-NEXT: pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i16, i16* %s.addr
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 0
+ ret <8 x i16> %i0
+}
+
+define <4 x i32> @insert_i32_firstelt(<4 x i32> %x, i32* %s.addr) {
+; SSE2-LABEL: insert_i32_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrd $0, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i32, i32* %s.addr
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 0
+ ret <4 x i32> %i0
+}
+
+define <2 x i64> @insert_i64_firstelt(<2 x i64> %x, i64* %s.addr) {
+; SSE2-LABEL: insert_i64_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrq $0, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrq $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i64, i64* %s.addr
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 0
+ ret <2 x i64> %i0
+}
+
+; 1'th element insertion.
+
+define <4 x float> @insert_f32_secondelt(<4 x float> %x, float* %s.addr) {
+; SSE2-LABEL: insert_f32_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f32_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f32_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT: retq
+ %s = load float, float* %s.addr
+ %i0 = insertelement <4 x float> %x, float %s, i32 1
+ ret <4 x float> %i0
+}
+
+define <2 x double> @insert_f64_secondelt(<2 x double> %x, double* %s.addr) {
+; SSE-LABEL: insert_f64_secondelt:
+; SSE: # %bb.0:
+; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f64_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; AVX-NEXT: retq
+ %s = load double, double* %s.addr
+ %i0 = insertelement <2 x double> %x, double %s, i32 1
+ ret <2 x double> %i0
+}
+
+define <16 x i8> @insert_i8_secondelt(<16 x i8> %x, i8* %s.addr) {
+; SSE2-LABEL: insert_i8_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movzbl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: psllw $8, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrb $1, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrb $1, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i8, i8* %s.addr
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 1
+ ret <16 x i8> %i0
+}
+
+define <8 x i16> @insert_i16_secondelt(<8 x i16> %x, i16* %s.addr) {
+; SSE-LABEL: insert_i16_secondelt:
+; SSE: # %bb.0:
+; SSE-NEXT: pinsrw $1, (%rdi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrw $1, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i16, i16* %s.addr
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 1
+ ret <8 x i16> %i0
+}
+
+define <4 x i32> @insert_i32_secondelt(<4 x i32> %x, i32* %s.addr) {
+; SSE2-LABEL: insert_i32_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrd $1, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrd $1, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i32, i32* %s.addr
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 1
+ ret <4 x i32> %i0
+}
+
+define <2 x i64> @insert_i64_secondelt(<2 x i64> %x, i64* %s.addr) {
+; SSE2-LABEL: insert_i64_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrq $1, (%rdi), %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrq $1, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i64, i64* %s.addr
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 1
+ ret <2 x i64> %i0
+}
+
+; element insertion into two elements
+
+define <4 x float> @insert_f32_two_elts(<4 x float> %x, float* %s.addr) {
+; SSE-LABEL: insert_f32_two_elts:
+; SSE: # %bb.0:
+; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f32_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,0],xmm0[2,3]
+; AVX-NEXT: retq
+ %s = load float, float* %s.addr
+ %i0 = insertelement <4 x float> %x, float %s, i32 0
+ %i1 = insertelement <4 x float> %i0, float %s, i32 1
+ ret <4 x float> %i1
+}
+
+define <2 x double> @insert_f64_two_elts(<2 x double> %x, double* %s.addr) {
+; SSE2-LABEL: insert_f64_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f64_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f64_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX-NEXT: retq
+ %s = load double, double* %s.addr
+ %i0 = insertelement <2 x double> %x, double %s, i32 0
+ %i1 = insertelement <2 x double> %i0, double %s, i32 1
+ ret <2 x double> %i1
+}
+
+define <16 x i8> @insert_i8_two_elts(<16 x i8> %x, i8* %s.addr) {
+; SSE2-LABEL: insert_i8_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movzbl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: psllw $8, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movzbl (%rdi), %eax
+; SSE41-NEXT: pinsrb $0, %eax, %xmm0
+; SSE41-NEXT: pinsrb $1, %eax, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: movzbl (%rdi), %eax
+; AVX-NEXT: vpinsrb $0, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i8, i8* %s.addr
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 0
+ %i1 = insertelement <16 x i8> %i0, i8 %s, i32 1
+ ret <16 x i8> %i1
+}
+
+define <8 x i16> @insert_i16_two_elts(<8 x i16> %x, i16* %s.addr) {
+; SSE-LABEL: insert_i16_two_elts:
+; SSE: # %bb.0:
+; SSE-NEXT: movzwl (%rdi), %eax
+; SSE-NEXT: pinsrw $0, %eax, %xmm0
+; SSE-NEXT: pinsrw $1, %eax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: movzwl (%rdi), %eax
+; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i16, i16* %s.addr
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 0
+ %i1 = insertelement <8 x i16> %i0, i16 %s, i32 1
+ ret <8 x i16> %i1
+}
+
+define <4 x i32> @insert_i32_two_elts(<4 x i32> %x, i32* %s.addr) {
+; SSE2-LABEL: insert_i32_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movl (%rdi), %eax
+; SSE41-NEXT: pinsrd $0, %eax, %xmm0
+; SSE41-NEXT: pinsrd $1, %eax, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i32, i32* %s.addr
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 0
+ %i1 = insertelement <4 x i32> %i0, i32 %s, i32 1
+ ret <4 x i32> %i1
+}
+
+define <2 x i64> @insert_i64_two_elts(<2 x i64> %x, i64* %s.addr) {
+; SSE2-LABEL: insert_i64_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: movq %rax, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movq (%rdi), %rax
+; SSE41-NEXT: pinsrq $0, %rax, %xmm0
+; SSE41-NEXT: pinsrq $1, %rax, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: movq (%rdi), %rax
+; AVX-NEXT: vpinsrq $0, %rax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrq $1, %rax, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %s = load i64, i64* %s.addr
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 0
+ %i1 = insertelement <2 x i64> %i0, i64 %s, i32 1
+ ret <2 x i64> %i1
+}
+
+; Special tests
+
+define void @insert_i32_two_elts_into_
diff erent_vectors(<4 x i32> %x, <4 x i32> %y, i32* %s.addr, <4 x i32>* %x.out.addr, <4 x i32>* %y.out.addr) {
+; SSE2-LABEL: insert_i32_two_elts_into_
diff erent_vectors:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
+; SSE2-NEXT: movaps %xmm0, (%rsi)
+; SSE2-NEXT: movaps %xmm2, (%rdx)
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_two_elts_into_
diff erent_vectors:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movl (%rdi), %eax
+; SSE41-NEXT: pinsrd $0, %eax, %xmm0
+; SSE41-NEXT: pinsrd $1, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm0, (%rsi)
+; SSE41-NEXT: movdqa %xmm1, (%rdx)
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_two_elts_into_
diff erent_vectors:
+; AVX: # %bb.0:
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vmovdqa %xmm0, (%rsi)
+; AVX-NEXT: vmovdqa %xmm1, (%rdx)
+; AVX-NEXT: retq
+ %s = load i32, i32* %s.addr
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 0
+ %i1 = insertelement <4 x i32> %y, i32 %s, i32 1
+ store <4 x i32> %i0, <4 x i32>* %x.out.addr
+ store <4 x i32> %i1, <4 x i32>* %y.out.addr
+ ret void
+}
+
+define <4 x float> @insert_f32_two_elts_extrause_of_scalar(<4 x float> %x, float* %s.addr, float* %s.out) {
+; SSE-LABEL: insert_f32_two_elts_extrause_of_scalar:
+; SSE: # %bb.0:
+; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss %xmm1, (%rsi)
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f32_two_elts_extrause_of_scalar:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss %xmm1, (%rsi)
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,0],xmm0[2,3]
+; AVX-NEXT: retq
+ %s = load float, float* %s.addr
+ store float %s, float* %s.out
+ %i0 = insertelement <4 x float> %x, float %s, i32 0
+ %i1 = insertelement <4 x float> %i0, float %s, i32 1
+ ret <4 x float> %i1
+}
diff --git a/llvm/test/CodeGen/X86/sse-insertelt.ll b/llvm/test/CodeGen/X86/sse-insertelt.ll
new file mode 100644
index 000000000000..89253c562a34
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-insertelt.ll
@@ -0,0 +1,376 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+
+; 0'th element insertion into an SSE register.
+
+define <4 x float> @insert_f32_firstelt(<4 x float> %x, float %s) {
+; SSE2-LABEL: insert_f32_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f32_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f32_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x float> %x, float %s, i32 0
+ ret <4 x float> %i0
+}
+
+define <2 x double> @insert_f64_firstelt(<2 x double> %x, double %s) {
+; SSE2-LABEL: insert_f64_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f64_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f64_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x double> %x, double %s, i32 0
+ ret <2 x double> %i0
+}
+
+define <16 x i8> @insert_i8_firstelt(<16 x i8> %x, i8 %s) {
+; SSE2-LABEL: insert_i8_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movd %edi, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrb $0, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 0
+ ret <16 x i8> %i0
+}
+
+define <8 x i16> @insert_i16_firstelt(<8 x i16> %x, i16 %s) {
+; SSE-LABEL: insert_i16_firstelt:
+; SSE: # %bb.0:
+; SSE-NEXT: pinsrw $0, %edi, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 0
+ ret <8 x i16> %i0
+}
+
+define <4 x i32> @insert_i32_firstelt(<4 x i32> %x, i32 %s) {
+; SSE2-LABEL: insert_i32_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %edi, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrd $0, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 0
+ ret <4 x i32> %i0
+}
+
+define <2 x i64> @insert_i64_firstelt(<2 x i64> %x, i64 %s) {
+; SSE2-LABEL: insert_i64_firstelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %rdi, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_firstelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrq $0, %rdi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_firstelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 0
+ ret <2 x i64> %i0
+}
+
+; 1'th element insertion.
+
+define <4 x float> @insert_f32_secondelt(<4 x float> %x, float %s) {
+; SSE2-LABEL: insert_f32_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f32_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f32_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x float> %x, float %s, i32 1
+ ret <4 x float> %i0
+}
+
+define <2 x double> @insert_f64_secondelt(<2 x double> %x, double %s) {
+; SSE-LABEL: insert_f64_secondelt:
+; SSE: # %bb.0:
+; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f64_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x double> %x, double %s, i32 1
+ ret <2 x double> %i0
+}
+
+define <16 x i8> @insert_i8_secondelt(<16 x i8> %x, i8 %s) {
+; SSE2-LABEL: insert_i8_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movd %edi, %xmm2
+; SSE2-NEXT: psllw $8, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrb $1, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 1
+ ret <16 x i8> %i0
+}
+
+define <8 x i16> @insert_i16_secondelt(<8 x i16> %x, i16 %s) {
+; SSE-LABEL: insert_i16_secondelt:
+; SSE: # %bb.0:
+; SSE-NEXT: pinsrw $1, %edi, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 1
+ ret <8 x i16> %i0
+}
+
+define <4 x i32> @insert_i32_secondelt(<4 x i32> %x, i32 %s) {
+; SSE2-LABEL: insert_i32_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %edi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrd $1, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 1
+ ret <4 x i32> %i0
+}
+
+define <2 x i64> @insert_i64_secondelt(<2 x i64> %x, i64 %s) {
+; SSE2-LABEL: insert_i64_secondelt:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %rdi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_secondelt:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrq $1, %rdi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_secondelt:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 1
+ ret <2 x i64> %i0
+}
+
+; element insertion into two elements
+
+define <4 x float> @insert_f32_two_elts(<4 x float> %x, float %s) {
+; SSE-LABEL: insert_f32_two_elts:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_f32_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,0],xmm0[2,3]
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x float> %x, float %s, i32 0
+ %i1 = insertelement <4 x float> %i0, float %s, i32 1
+ ret <4 x float> %i1
+}
+
+define <2 x double> @insert_f64_two_elts(<2 x double> %x, double %s) {
+; SSE2-LABEL: insert_f64_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_f64_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_f64_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x double> %x, double %s, i32 0
+ %i1 = insertelement <2 x double> %i0, double %s, i32 1
+ ret <2 x double> %i1
+}
+
+define <16 x i8> @insert_i8_two_elts(<16 x i8> %x, i8 %s) {
+; SSE2-LABEL: insert_i8_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movd %edi, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: psllw $8, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i8_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrb $0, %edi, %xmm0
+; SSE41-NEXT: pinsrb $1, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i8_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <16 x i8> %x, i8 %s, i32 0
+ %i1 = insertelement <16 x i8> %i0, i8 %s, i32 1
+ ret <16 x i8> %i1
+}
+
+define <8 x i16> @insert_i16_two_elts(<8 x i16> %x, i16 %s) {
+; SSE-LABEL: insert_i16_two_elts:
+; SSE: # %bb.0:
+; SSE-NEXT: pinsrw $0, %edi, %xmm0
+; SSE-NEXT: pinsrw $1, %edi, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: insert_i16_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <8 x i16> %x, i16 %s, i32 0
+ %i1 = insertelement <8 x i16> %i0, i16 %s, i32 1
+ ret <8 x i16> %i1
+}
+
+define <4 x i32> @insert_i32_two_elts(<4 x i32> %x, i32 %s) {
+; SSE2-LABEL: insert_i32_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %edi, %xmm2
+; SSE2-NEXT: movd %edi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i32_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrd $0, %edi, %xmm0
+; SSE41-NEXT: pinsrd $1, %edi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i32_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
+; AVX-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <4 x i32> %x, i32 %s, i32 0
+ %i1 = insertelement <4 x i32> %i0, i32 %s, i32 1
+ ret <4 x i32> %i1
+}
+
+define <2 x i64> @insert_i64_two_elts(<2 x i64> %x, i64 %s) {
+; SSE2-LABEL: insert_i64_two_elts:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %rdi, %xmm0
+; SSE2-NEXT: movq %rdi, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: insert_i64_two_elts:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pinsrq $0, %rdi, %xmm0
+; SSE41-NEXT: pinsrq $1, %rdi, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: insert_i64_two_elts:
+; AVX: # %bb.0:
+; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm0
+; AVX-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %i0 = insertelement <2 x i64> %x, i64 %s, i32 0
+ %i1 = insertelement <2 x i64> %i0, i64 %s, i32 1
+ ret <2 x i64> %i1
+}
More information about the llvm-commits
mailing list