[llvm] r284305 - [AVX-512] Add shuffle comments for vbroadcast instructions.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Oct 15 09:26:07 PDT 2016
Author: ctopper
Date: Sat Oct 15 11:26:07 2016
New Revision: 284305
URL: http://llvm.org/viewvc/llvm-project?rev=284305&view=rev
Log:
[AVX-512] Add shuffle comments for vbroadcast instructions.
Modified:
llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp
llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
Modified: llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp (original)
+++ llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp Sat Oct 15 11:26:07 2016
@@ -979,9 +979,52 @@ bool llvm::EmitAnyX86InstComments(const
case X86::VBROADCASTF128:
case X86::VBROADCASTI128:
+ CASE_AVX512_INS_COMMON(BROADCASTF64X2, Z128, rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI64X2, Z128, rm)
DecodeSubVectorBroadcast(MVT::v4f64, MVT::v2f64, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
+ CASE_AVX512_INS_COMMON(BROADCASTF64X2, , rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI64X2, , rm)
+ DecodeSubVectorBroadcast(MVT::v8f64, MVT::v2f64, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF64X4, , rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI64X4, , rm)
+ DecodeSubVectorBroadcast(MVT::v8f64, MVT::v4f64, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF32X4, Z256, rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X4, Z256, rm)
+ DecodeSubVectorBroadcast(MVT::v8f32, MVT::v4f32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF32X4, , rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X4, , rm)
+ DecodeSubVectorBroadcast(MVT::v16f32, MVT::v4f32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF32X8, , rm)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X8, , rm)
+ DecodeSubVectorBroadcast(MVT::v16f32, MVT::v8f32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z256, r)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z256, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z256, m)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z256, m)
+ DecodeSubVectorBroadcast(MVT::v8f32, MVT::v2f32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z, r)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z, m)
+ CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z, m)
+ DecodeSubVectorBroadcast(MVT::v16f32, MVT::v2f32, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
CASE_PMOVZX(PMOVZXBW, r)
CASE_PMOVZX(PMOVZXBD, r)
Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll Sat Oct 15 11:26:07 2016
@@ -10,19 +10,19 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -34,19 +34,19 @@ define <4 x double> @test_broadcast_2f64
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -58,7 +58,7 @@ define <4 x i64> @test_broadcast_2i64_4i
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -70,7 +70,7 @@ define <8 x float> @test_broadcast_4f32_
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -82,7 +82,7 @@ define <8 x i32> @test_broadcast_4i32_8i
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -94,7 +94,7 @@ define <16 x i16> @test_broadcast_8i16_1
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
@@ -110,21 +110,21 @@ define <32 x i8> @test_broadcast_16i8_32
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -137,21 +137,21 @@ define <8 x double> @test_broadcast_2f64
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -164,21 +164,21 @@ define <8 x i64> @test_broadcast_2i64_8i
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -191,21 +191,21 @@ define <16 x float> @test_broadcast_4f32
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -218,21 +218,21 @@ define <16 x i32> @test_broadcast_4i32_1
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
@@ -245,21 +245,21 @@ define <32 x i16> @test_broadcast_8i16_3
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512DQVL-NEXT: vbroadcasti32x4 {{.*#+}} ymm1 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll Sat Oct 15 11:26:07 2016
@@ -532,9 +532,9 @@ define <16 x float>@test_int_x86_avx512_
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vbroadcastf32x2 %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vbroadcastf32x2 %xmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vbroadcastf32x2 %xmm0, %zmm0
+; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm2 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -552,9 +552,9 @@ define <16 x i32>@test_int_x86_avx512_ma
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vbroadcasti32x2 %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vbroadcasti32x2 %xmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vbroadcasti32x2 %xmm0, %zmm0
+; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm2 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll Sat Oct 15 11:26:07 2016
@@ -652,8 +652,11 @@ define <8 x float>@test_int_x86_avx512_m
; CHECK: ## BB#0:
; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x19,0xc8]
+; CHECK-NEXT: ## ymm1 = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x19,0xd0]
+; CHECK-NEXT: ## ymm2 = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x19,0xc0]
+; CHECK-NEXT: ## ymm0 = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## encoding: [0x62,0xf1,0x74,0x28,0x58,0xca]
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0x74,0x28,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -672,8 +675,11 @@ define <8 x i32>@test_int_x86_avx512_mas
; CHECK: ## BB#0:
; CHECK-NEXT: kmovb %edi, %k1 ## encoding: [0xc5,0xf9,0x92,0xcf]
; CHECK-NEXT: vbroadcasti32x2 (%rsi), %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x59,0x0e]
+; CHECK-NEXT: ## ymm1 = mem[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x59,0xd0]
+; CHECK-NEXT: ## ymm2 = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %ymm0 ## encoding: [0x62,0xf2,0x7d,0x28,0x59,0xc0]
+; CHECK-NEXT: ## ymm0 = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0x6d,0x28,0xfe,0xc0]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## encoding: [0x62,0xf1,0x75,0x28,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
Modified: llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll Sat Oct 15 11:26:07 2016
@@ -24,19 +24,19 @@ define <4 x double> @test_broadcast_2f64
; X32-AVX512F-LABEL: test_broadcast_2f64_4f64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2f64_4f64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2f64_4f64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcastf64x2 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_2f64_4f64:
@@ -46,17 +46,17 @@ define <4 x double> @test_broadcast_2f64
;
; X64-AVX512F-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
%2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -74,21 +74,21 @@ define <8 x double> @test_broadcast_2f64
; X32-AVX512F-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2f64_8f64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcastf64x2 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
;
@@ -100,19 +100,19 @@ define <8 x double> @test_broadcast_2f64
;
; X64-AVX512F-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -161,19 +161,19 @@ define <4 x i64> @test_broadcast_2i64_4i
; X32-AVX512F-LABEL: test_broadcast_2i64_4i64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2i64_4i64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2i64_4i64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti64x2 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_2i64_4i64:
@@ -183,17 +183,17 @@ define <4 x i64> @test_broadcast_2i64_4i
;
; X64-AVX512F-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -218,21 +218,21 @@ define <8 x i64> @test_broadcast_2i64_8i
; X32-AVX512F-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_2i64_8i64:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti64x2 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
;
@@ -250,19 +250,19 @@ define <8 x i64> @test_broadcast_2i64_8i
;
; X64-AVX512F-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -311,7 +311,7 @@ define <8 x float> @test_broadcast_4f32_
; X32-AVX512-LABEL: test_broadcast_4f32_8f32:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4f32_8f32:
@@ -321,7 +321,7 @@ define <8 x float> @test_broadcast_4f32_
;
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -339,21 +339,21 @@ define <16 x float> @test_broadcast_4f32
; X32-AVX512F-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4f32_16f32:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcastf32x4 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
;
@@ -365,19 +365,19 @@ define <16 x float> @test_broadcast_4f32
;
; X64-AVX512F-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -452,7 +452,7 @@ define <8 x i32> @test_broadcast_4i32_8i
; X32-AVX512-LABEL: test_broadcast_4i32_8i32:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_4i32_8i32:
@@ -462,7 +462,7 @@ define <8 x i32> @test_broadcast_4i32_8i
;
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -487,21 +487,21 @@ define <16 x i32> @test_broadcast_4i32_1
; X32-AVX512F-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_4i32_16i32:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512DQ-NEXT: retl
;
@@ -519,19 +519,19 @@ define <16 x i32> @test_broadcast_4i32_1
;
; X64-AVX512F-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQ-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -606,7 +606,7 @@ define <16 x i16> @test_broadcast_8i16_1
; X32-AVX512-LABEL: test_broadcast_8i16_16i16:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_8i16_16i16:
@@ -616,7 +616,7 @@ define <16 x i16> @test_broadcast_8i16_1
;
; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -641,21 +641,21 @@ define <32 x i16> @test_broadcast_8i16_3
; X32-AVX512F-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vmovdqa64 %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
@@ -673,19 +673,19 @@ define <32 x i16> @test_broadcast_8i16_3
;
; X64-AVX512F-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vmovdqa64 %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -760,7 +760,7 @@ define <32 x i8> @test_broadcast_16i8_32
; X32-AVX512-LABEL: test_broadcast_16i8_32i8:
; X32-AVX512: ## BB#0:
; X32-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: test_broadcast_16i8_32i8:
@@ -770,7 +770,7 @@ define <32 x i8> @test_broadcast_16i8_32
;
; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -795,21 +795,21 @@ define <64 x i8> @test_broadcast_16i8_64
; X32-AVX512F-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512F: ## BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512F-NEXT: vmovdqa64 %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512BW: ## BB#0:
; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512BW-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
; X32-AVX512DQ: ## BB#0:
; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512DQ-NEXT: vbroadcasti32x4 (%eax), %ymm0
+; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X32-AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
@@ -827,19 +827,19 @@ define <64 x i8> @test_broadcast_16i8_64
;
; X64-AVX512F-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512F: ## BB#0:
-; X64-AVX512F-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512F-NEXT: vmovdqa64 %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BW: ## BB#0:
-; X64-AVX512BW-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQ: ## BB#0:
-; X64-AVX512DQ-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; X64-AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=284305&r1=284304&r2=284305&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Sat Oct 15 11:26:07 2016
@@ -1318,7 +1318,7 @@ define <4 x i64> @splat128_mem_v4i64_fro
;
; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; AVX512VL-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; AVX512VL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
%shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1338,7 +1338,7 @@ define <4 x double> @splat128_mem_v4f64_
;
; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; AVX512VL-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 = mem[0,1,2,3,0,1,2,3]
; AVX512VL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
More information about the llvm-commits
mailing list