[llvm] r247149 - AVX512: Implemented encoding and intrinsics for

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 9 07:35:10 PDT 2015


Author: ibreger
Date: Wed Sep  9 09:35:09 2015
New Revision: 247149

URL: http://llvm.org/viewvc/llvm-project?rev=247149&view=rev
Log:
AVX512: Implemented encoding and intrinsics for
  vextracti64x4 ,vextracti64x2, vextracti32x8, vextracti32x4, vextractf64x4, vextractf64x2, vextractf32x8, vextractf32x4
Added tests for intrinsics and encoding.

Differential Revision: http://reviews.llvm.org/D11802

Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsX86.td
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
    llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
    llvm/trunk/test/MC/X86/avx512-encodings.s
    llvm/trunk/test/MC/X86/x86-64-avx512dq.s
    llvm/trunk/test/MC/X86/x86-64-avx512dq_vl.s
    llvm/trunk/test/MC/X86/x86-64-avx512f_vl.s

Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Wed Sep  9 09:35:09 2015
@@ -2233,20 +2233,52 @@ let TargetPrefix = "x86" in {  // All in
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_avx512_mask_vextractf32x4_512 :
       GCCBuiltin<"__builtin_ia32_extractf32x4_mask">,
-                 Intrinsic<[llvm_v4f32_ty], [llvm_v16f32_ty, llvm_i8_ty,
-                           llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+                 Intrinsic<[llvm_v4f32_ty], [llvm_v16f32_ty, llvm_i32_ty,
+                            llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
   def int_x86_avx512_mask_vextracti32x4_512 :
       GCCBuiltin<"__builtin_ia32_extracti32x4_mask">,
-                 Intrinsic<[llvm_v4i32_ty], [llvm_v16i32_ty, llvm_i8_ty,
-                           llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+                 Intrinsic<[llvm_v4i32_ty], [llvm_v16i32_ty, llvm_i32_ty,
+                            llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextractf32x4_256 :
+      GCCBuiltin<"__builtin_ia32_extractf32x4_256_mask">,
+                 Intrinsic<[llvm_v4f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+                            llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextracti32x4_256 :
+      GCCBuiltin<"__builtin_ia32_extracti32x4_256_mask">,
+                 Intrinsic<[llvm_v4i32_ty], [llvm_v8i32_ty, llvm_i32_ty,
+                            llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextractf64x2_256 :
+      GCCBuiltin<"__builtin_ia32_extractf64x2_256_mask">,
+                 Intrinsic<[llvm_v2f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+                            llvm_v2f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextracti64x2_256 :
+      GCCBuiltin<"__builtin_ia32_extracti64x2_256_mask">,
+                 Intrinsic<[llvm_v2i64_ty], [llvm_v4i64_ty, llvm_i32_ty,
+                            llvm_v2i64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextractf64x2_512 :
+      GCCBuiltin<"__builtin_ia32_extractf64x2_512_mask">,
+                 Intrinsic<[llvm_v2f64_ty], [llvm_v8f64_ty, llvm_i32_ty,
+                            llvm_v2f64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextracti64x2_512 :
+      GCCBuiltin<"__builtin_ia32_extracti64x2_512_mask">,
+                 Intrinsic<[llvm_v2i64_ty], [llvm_v8i64_ty, llvm_i32_ty,
+                            llvm_v2i64_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextractf32x8_512 :
+      GCCBuiltin<"__builtin_ia32_extractf32x8_mask">,
+                 Intrinsic<[llvm_v8f32_ty], [llvm_v16f32_ty, llvm_i32_ty,
+                            llvm_v8f32_ty,  llvm_i8_ty], [IntrNoMem]>;
+  def int_x86_avx512_mask_vextracti32x8_512 :
+      GCCBuiltin<"__builtin_ia32_extracti32x8_mask">,
+                 Intrinsic<[llvm_v8i32_ty],[llvm_v16i32_ty, llvm_i32_ty,
+                            llvm_v8i32_ty,  llvm_i8_ty], [IntrNoMem]>;
   def int_x86_avx512_mask_vextractf64x4_512 :
       GCCBuiltin<"__builtin_ia32_extractf64x4_mask">,
-                 Intrinsic<[llvm_v4f64_ty], [llvm_v8f64_ty, llvm_i8_ty,
-                           llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+                 Intrinsic<[llvm_v4f64_ty], [llvm_v8f64_ty, llvm_i32_ty,
+                            llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
   def int_x86_avx512_mask_vextracti64x4_512 :
       GCCBuiltin<"__builtin_ia32_extracti64x4_mask">,
-                 Intrinsic<[llvm_v4i64_ty], [llvm_v8i64_ty, llvm_i8_ty,
-                           llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+                 Intrinsic<[llvm_v4i64_ty], [llvm_v8i64_ty, llvm_i32_ty,
+                            llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
 }
 
 // Conditional load ops

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Wed Sep  9 09:35:09 2015
@@ -566,85 +566,142 @@ def VINSERTPSzrm: AVX512AIi8<0x21, MRMSr
 // AVX-512 VECTOR EXTRACT
 //---
 
+multiclass vextract_for_size_first_position_lowering<X86VectorVTInfo From,
+                                                     X86VectorVTInfo To> {
+  // A subvector extract from the first vector position is
+  // a subregister copy that needs no instruction.
+  def NAME # To.NumElts:
+      Pat<(To.VT (extract_subvector (From.VT From.RC:$src),(iPTR 0))),
+          (To.VT (EXTRACT_SUBREG (From.VT From.RC:$src), To.SubRegIdx))>;
+}
+
 multiclass vextract_for_size<int Opcode,
-                             X86VectorVTInfo From, X86VectorVTInfo To,
-                             X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
-                             PatFrag vextract_extract,
-                             SDNodeXForm EXTRACT_get_vextract_imm> {
+                                    X86VectorVTInfo From, X86VectorVTInfo To,
+                                    PatFrag vextract_extract> :
+  vextract_for_size_first_position_lowering<From, To> {
+
   let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
+    // use AVX512_maskable_in_asm (AVX512_maskable can't be used due to
+    // vextract_extract), we interesting only in patterns without mask,
+    // intrinsics pattern match generated bellow.
     defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
-                (ins VR512:$src1, u8imm:$idx),
-                "vextract" # To.EltTypeName # "x4",
+                (ins From.RC:$src1, i32u8imm:$idx),
+                "vextract" # To.EltTypeName # "x" # To.NumElts,
                 "$idx, $src1", "$src1, $idx",
-                [(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
+                [(set To.RC:$dst, (vextract_extract:$idx (From.VT From.RC:$src1),
                                                          (iPTR imm)))]>,
-              AVX512AIi8Base, EVEX, EVEX_V512;
-    let mayStore = 1 in
-    def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
-            (ins To.MemOp:$dst, VR512:$src1, u8imm:$src2),
-            "vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
-                                               "$dst, $src1, $src2}",
-            []>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
+              AVX512AIi8Base, EVEX;
+    let mayStore = 1 in {
+      def rm  : AVX512AIi8<Opcode, MRMDestMem, (outs),
+                      (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$src2),
+                      "vextract" # To.EltTypeName # "x" # To.NumElts #
+                          "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                      []>, EVEX;
+
+      def rmk : AVX512AIi8<Opcode, MRMDestMem, (outs),
+                      (ins To.MemOp:$dst, To.KRCWM:$mask,
+                                          From.RC:$src1, i32u8imm:$src2),
+                       "vextract" # To.EltTypeName # "x" # To.NumElts #
+                            "\t{$src2, $src1, $dst {${mask}}|"
+                            "$dst {${mask}}, $src1, $src2}",
+                      []>, EVEX_K, EVEX;
+    }//mayStore = 1
   }
 
-  // Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for
-  // vextracti32x4
-  def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)),
-            (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr")
-                          VR512:$src1,
-                          (EXTRACT_get_vextract_imm To.RC:$ext)))>;
-
-  // A 128/256-bit subvector extract from the first 512-bit vector position is
-  // a subregister copy that needs no instruction.
-  def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))),
-            (To.VT
-               (EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>;
-
-  // And for the alternative types.
-  def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))),
-            (AltTo.VT
-               (EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>;
-
   // Intrinsic call with masking.
   def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
-                              "x4_512")
-                VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask),
-            (!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0,
-                (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
-                VR512:$src1, imm:$idx)>;
+                              "x" # To.NumElts # "_" # From.Size)
+                From.RC:$src1, (iPTR imm:$idx), To.RC:$src0, To.MRC:$mask),
+            (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
+                                From.ZSuffix # "rrk")
+                To.RC:$src0,
+                (COPY_TO_REGCLASS To.MRC:$mask, To.KRCWM),
+                From.RC:$src1, imm:$idx)>;
 
   // Intrinsic call with zero-masking.
   def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
-                              "x4_512")
-                VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask),
-            (!cast<Instruction>(NAME # To.EltSize # "x4rrkz")
-                (v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
-                VR512:$src1, imm:$idx)>;
+                              "x" # To.NumElts # "_" # From.Size)
+                From.RC:$src1, (iPTR imm:$idx), To.ImmAllZerosV, To.MRC:$mask),
+            (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
+                                From.ZSuffix # "rrkz")
+                (COPY_TO_REGCLASS To.MRC:$mask, To.KRCWM),
+                From.RC:$src1, imm:$idx)>;
 
   // Intrinsic call without masking.
   def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
-                              "x4_512")
-                VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
-            (!cast<Instruction>(NAME # To.EltSize # "x4rr")
-                VR512:$src1, imm:$idx)>;
+                              "x" # To.NumElts # "_" # From.Size)
+                From.RC:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
+            (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
+                                From.ZSuffix # "rr")
+                From.RC:$src1, imm:$idx)>;
 }
 
-multiclass vextract_for_type<ValueType EltVT32, int Opcode32,
-                             ValueType EltVT64, int Opcode64> {
-  defm NAME # "32x4" : vextract_for_size<Opcode32,
+// This multiclass generates patterns for matching vextract with common types
+// (X86VectorVTInfo From , X86VectorVTInfo To) and alternative types
+// (X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo)
+multiclass vextract_for_size_all<int Opcode,
+                             X86VectorVTInfo From, X86VectorVTInfo To,
+                             X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
+                             PatFrag vextract_extract,
+                             SDNodeXForm EXTRACT_get_vextract_imm> :
+  vextract_for_size<Opcode, From, To, vextract_extract>,
+  vextract_for_size_first_position_lowering<AltFrom, AltTo> {
+
+  // Codegen pattern with the alternative types.
+  // Only add this if operation not supported natively via AVX512DQ
+  let Predicates = [NoDQI] in
+    def : Pat<(vextract_extract:$ext (AltFrom.VT AltFrom.RC:$src1), (iPTR imm)),
+              (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x" #
+                                            To.NumElts # From.ZSuffix # "rr")
+                         AltFrom.RC:$src1,
+                         (EXTRACT_get_vextract_imm To.RC:$ext)))>;
+}
+
+multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
+                             ValueType EltVT64, int Opcode256> {
+  defm NAME # "32x4Z" : vextract_for_size_all<Opcode128,
                                  X86VectorVTInfo<16, EltVT32, VR512>,
                                  X86VectorVTInfo< 4, EltVT32, VR128X>,
                                  X86VectorVTInfo< 8, EltVT64, VR512>,
                                  X86VectorVTInfo< 2, EltVT64, VR128X>,
                                  vextract128_extract,
-                                 EXTRACT_get_vextract128_imm>;
-  defm NAME # "64x4" : vextract_for_size<Opcode64,
+                                 EXTRACT_get_vextract128_imm>,
+                                     EVEX_V512, EVEX_CD8<32, CD8VT4>;
+  defm NAME # "64x4Z" : vextract_for_size_all<Opcode256,
                                  X86VectorVTInfo< 8, EltVT64, VR512>,
                                  X86VectorVTInfo< 4, EltVT64, VR256X>,
                                  X86VectorVTInfo<16, EltVT32, VR512>,
                                  X86VectorVTInfo< 8, EltVT32, VR256>,
                                  vextract256_extract,
-                                 EXTRACT_get_vextract256_imm>, VEX_W;
+                                 EXTRACT_get_vextract256_imm>,
+                                     VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>;
+  let Predicates = [HasVLX] in
+    defm NAME # "32x4Z256" : vextract_for_size_all<Opcode128,
+                                 X86VectorVTInfo< 8, EltVT32, VR256X>,
+                                 X86VectorVTInfo< 4, EltVT32, VR128X>,
+                                 X86VectorVTInfo< 4, EltVT64, VR256X>,
+                                 X86VectorVTInfo< 2, EltVT64, VR128X>,
+                                 vextract128_extract,
+                                 EXTRACT_get_vextract128_imm>,
+                                     EVEX_V256, EVEX_CD8<32, CD8VT4>;
+  let Predicates = [HasVLX, HasDQI] in
+    defm NAME # "64x2Z256" : vextract_for_size<Opcode128,
+                                 X86VectorVTInfo< 4, EltVT64, VR256X>,
+                                 X86VectorVTInfo< 2, EltVT64, VR128X>,
+                                 vextract128_extract>,
+                                     VEX_W, EVEX_V256, EVEX_CD8<64, CD8VT2>;
+  let Predicates = [HasDQI] in {
+    defm NAME # "64x2Z" : vextract_for_size<Opcode128,
+                                 X86VectorVTInfo< 8, EltVT64, VR512>,
+                                 X86VectorVTInfo< 2, EltVT64, VR128X>,
+                                 vextract128_extract>,
+                                     VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT2>;
+    defm NAME # "32x8Z" : vextract_for_size<Opcode256,
+                                 X86VectorVTInfo<16, EltVT32, VR512>,
+                                 X86VectorVTInfo< 8, EltVT32, VR256X>,
+                                 vextract256_extract>,
+                                     EVEX_V512, EVEX_CD8<32, CD8VT8>;
+  }
 }
 
 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Wed Sep  9 09:35:09 2015
@@ -52,14 +52,14 @@ define <4 x i64> @f32tosl(<4 x float> %a
 }
 
 ; CHECK-LABEL: sltof432
-; CHECK: vcvtqq2ps 
+; CHECK: vcvtqq2ps
 define <4 x float> @sltof432(<4 x i64> %a) {
   %b = sitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
 }
 
 ; CHECK-LABEL: ultof432
-; CHECK: vcvtuqq2ps 
+; CHECK: vcvtuqq2ps
 define <4 x float> @ultof432(<4 x i64> %a) {
   %b = uitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
@@ -279,12 +279,14 @@ define i32 @float_to_int(float %x) {
    ret i32 %res
 }
 
-; CHECK-LABEL: uitof64
-; CHECK: vcvtudq2pd
-; CHECK: vextracti64x4
-; CHECK: vcvtudq2pd
-; CHECK: ret
 define <16 x double> @uitof64(<16 x i32> %a) nounwind {
+; CHECK-LABEL: uitof64:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vcvtudq2pd %ymm0, %zmm2
+; CHECK-NEXT:    vextracti32x8 $1, %zmm0, %ymm0
+; CHECK-NEXT:    vcvtudq2pd %ymm0, %zmm1
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
+; CHECK-NEXT:    retq 
   %b = uitofp <16 x i32> %a to <16 x double>
   ret <16 x double> %b
 }
@@ -407,7 +409,7 @@ define <8 x double> @sitofp_8i1_double(<
 }
 
 ; CHECK-LABEL: @uitofp_16i8
-; CHECK:  vpmovzxbd  
+; CHECK:  vpmovzxbd
 ; CHECK: vcvtudq2ps
 define <16 x float> @uitofp_16i8(<16 x i8>%a) {
   %b = uitofp <16 x i8> %a to <16 x float>

Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll Wed Sep  9 09:35:09 2015
@@ -12,14 +12,24 @@ define <16 x float> @test1(<16 x float>
   ret <16 x float> %rrr3
 }
 
-;CHECK-LABEL: test2:
-;KNL: vinsertf32x4 $0
-;SKX: vinsertf64x2 $0
-;CHECK: vextractf32x4 $3
-;KNL: vinsertf32x4 $3
-;SKX: vinsertf64x2 $3
-;CHECK: ret
 define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
+; KNL-LABEL: test2:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vmovhpd (%rdi), %xmm0, %xmm2
+; KNL-NEXT:    vinsertf32x4 $0, %xmm2, %zmm0, %zmm0
+; KNL-NEXT:    vextractf32x4 $3, %zmm0, %xmm2
+; KNL-NEXT:    vmovsd %xmm1, %xmm2, %xmm1
+; KNL-NEXT:    vinsertf32x4 $3, %xmm1, %zmm0, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test2:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vmovhpd (%rdi), %xmm0, %xmm2
+; SKX-NEXT:    vinsertf64x2 $0, %xmm2, %zmm0, %zmm0
+; SKX-NEXT:    vextractf64x2 $3, %zmm0, %xmm2
+; SKX-NEXT:    vmovsd %xmm1, %xmm2, %xmm1
+; SKX-NEXT:    vinsertf64x2 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
   %rrr = load double, double* %br
   %rrr2 = insertelement <8 x double> %x, double %rrr, i32 1
   %rrr3 = insertelement <8 x double> %rrr2, double %y, i32 6
@@ -36,12 +46,22 @@ define <16 x float> @test3(<16 x float>
   ret <16 x float> %rrr2
 }
 
-;CHECK-LABEL: test4:
-;CHECK: vextracti32x4 $2
-;KNL: vinserti32x4 $0
-;SKX: vinserti64x2 $0
-;CHECK: ret
 define <8 x i64> @test4(<8 x i64> %x) nounwind {
+; KNL-LABEL: test4:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; KNL-NEXT:    vmovq %xmm1, %rax
+; KNL-NEXT:    vpinsrq $1, %rax, %xmm0, %xmm1
+; KNL-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: test4:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vextracti64x2 $2, %zmm0, %xmm1
+; SKX-NEXT:    vmovq %xmm1, %rax
+; SKX-NEXT:    vpinsrq $1, %rax, %xmm0, %xmm1
+; SKX-NEXT:    vinserti64x2 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
   %eee = extractelement <8 x i64> %x, i32 4
   %rrr2 = insertelement <8 x i64> %x, i64 %eee, i32 1
   ret <8 x i64> %rrr2
@@ -142,7 +162,7 @@ define i64 @test12(<16 x i64>%a, <16 x i
 ;CHECK: andl    $1, %eax
 ;CHECK: kmovw   %eax, %k0
 ;CHECK: movw    $-4
-;CHECK: korw    
+;CHECK: korw
 define i16 @test13(i32 %a, i32 %b) {
   %cmp_res = icmp ult i32 %a, %b
   %maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Wed Sep  9 09:35:09 2015
@@ -911,38 +911,38 @@ declare i8 @llvm.x86.avx512.mask.ucmp.q.
 define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
 ; CHECK-LABEL: test_mask_vextractf32x4:
 ; CHECK: vextractf32x4 $2, %zmm1, %xmm0 {%k1}
-  %res = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float> %a, i8 2, <4 x float> %b, i8 %mask)
+  %res = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float> %a, i32 2, <4 x float> %b, i8 %mask)
   ret <4 x float> %res
 }
 
-declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i8, <4 x float>, i8)
+declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i32, <4 x float>, i8)
 
 define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) {
 ; CHECK-LABEL: test_mask_vextracti64x4:
 ; CHECK: vextracti64x4 $2, %zmm1, %ymm0 {%k1}
-  %res = call <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64> %a, i8 2, <4 x i64> %b, i8 %mask)
+  %res = call <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64> %a, i32 2, <4 x i64> %b, i8 %mask)
   ret <4 x i64> %res
 }
 
-declare <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64>, i8, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64>, i32, <4 x i64>, i8)
 
 define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
 ; CHECK-LABEL: test_maskz_vextracti32x4:
 ; CHECK: vextracti32x4 $2, %zmm0, %xmm0 {%k1} {z}
-  %res = call <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32> %a, i8 2, <4 x i32> zeroinitializer, i8 %mask)
+  %res = call <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32> %a, i32 2, <4 x i32> zeroinitializer, i8 %mask)
   ret <4 x i32> %res
 }
 
-declare <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32>, i8, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32>, i32, <4 x i32>, i8)
 
 define <4 x double> @test_vextractf64x4(<8 x double> %a) {
 ; CHECK-LABEL: test_vextractf64x4:
 ; CHECK: vextractf64x4 $2, %zmm0, %ymm0 ##
-  %res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i8 2, <4 x double> zeroinitializer, i8 -1)
+  %res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i32 2, <4 x double> zeroinitializer, i8 -1)
   ret <4 x double> %res
 }
 
-declare <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double>, i8, <4 x double>, i8)
+declare <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double>, i32, <4 x double>, i8)
 
 define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
   ; CHECK-LABEL: test_x86_avx512_pslli_d

Modified: llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll Wed Sep  9 09:35:09 2015
@@ -315,3 +315,44 @@ define <2 x double>@test_int_x86_avx512_
   %res2 = fadd <2 x double> %res, %res1
   ret <2 x double> %res2
 }
+
+
+declare <2 x double> @llvm.x86.avx512.mask.vextractf64x2.512(<8 x double>, i32, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_512:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    kmovw %edi, %k1
+; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm1 {%k1}
+; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vextractf64x2 $1, %zmm0, %xmm0
+; CHECK-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.512(<8 x double> %x0,i32 1, <2 x double> %x2, i8 %x3)
+  %res2 = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.512(<8 x double> %x0,i32 1, <2 x double> zeroinitializer, i8 %x3)
+  %res1 = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.512(<8 x double> %x0,i32 1, <2 x double> zeroinitializer, i8 -1)
+  %res3 = fadd <2 x double> %res, %res1
+  %res4 = fadd <2 x double> %res2, %res3
+  ret <2 x double> %res4
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float>, i32, <8 x float>, i8)
+
+define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vextractf32x8:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    kmovw %edi, %k1
+; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm1 {%k1}
+; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vextractf32x8 $1, %zmm0, %ymm0
+; CHECK-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    vaddps %ymm0, %ymm2, %ymm0
+; CHECK-NEXT:    retq
+  %res  = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> %x2, i8 %x3)
+  %res2 = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> zeroinitializer, i8 %x3)
+  %res1 = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> zeroinitializer, i8 -1)
+  %res3 = fadd <8 x float> %res, %res1
+  %res4 = fadd <8 x float> %res2, %res3
+  ret <8 x float> %res4
+}

Modified: llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll Wed Sep  9 09:35:09 2015
@@ -1648,3 +1648,23 @@ define <8 x float>@test_int_x86_avx512_m
   %res2 = fadd <8 x float> %res, %res1
   ret <8 x float> %res2
 }
+
+declare <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double>, i32, <2 x double>, i8)
+
+define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0, <2 x double> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    kmovw %edi, %k1
+; CHECK-NEXT:    vextractf64x2 $1, %ymm0, %xmm1 {%k1}
+; CHECK-NEXT:    vextractf64x2 $1, %ymm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vextractf64x2 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> %x2, i8 %x3)
+  %res2 = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> zeroinitializer, i8 %x3)
+  %res1 = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> zeroinitializer, i8 -1)
+  %res3 = fadd <2 x double> %res, %res1
+  %res4 = fadd <2 x double> %res3, %res2
+  ret <2 x double> %res4
+}

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Wed Sep  9 09:35:09 2015
@@ -4508,6 +4508,26 @@ define <8 x float>@test_int_x86_avx512_m
   ret <8 x float> %res2
 }
 
+declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.256(<8 x float>, i32, <4 x float>, i8)
+
+define <4 x float>@test_int_x86_avx512_mask_vextractf32x4_256(<8 x float> %x0, <4 x float> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_vextractf32x4_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    kmovw %edi, %k1
+; CHECK-NEXT:    vextractf32x4 $1, %ymm0, %xmm1 {%k1}
+; CHECK-NEXT:    vextractf32x4 $1, %ymm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vextractf32x4 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vaddps %xmm2, %xmm1, %xmm1
+; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res  = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.256(<8 x float> %x0, i32 1, <4 x float> %x2, i8 %x3)
+  %res1 = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.256(<8 x float> %x0, i32 1, <4 x float> zeroinitializer, i8 %x3)
+  %res2 = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.256(<8 x float> %x0, i32 1, <4 x float> zeroinitializer, i8 -1)
+  %res3 = fadd <4 x float> %res, %res1
+  %res4 = fadd <4 x float> %res2, %res3
+  ret <4 x float> %res4
+}
+
 declare <2 x double> @llvm.x86.avx512.mask.getmant.pd.128(<2 x double>, i32, <2 x double>, i8)
 
 define <2 x double>@test_int_x86_avx512_mask_getmant_pd_128(<2 x double> %x0, <2 x double> %x2, i8 %x3) {

Modified: llvm/trunk/test/MC/X86/avx512-encodings.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512-encodings.s?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/avx512-encodings.s (original)
+++ llvm/trunk/test/MC/X86/avx512-encodings.s Wed Sep  9 09:35:09 2015
@@ -14958,6 +14958,198 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0xf2,0xc5,0x08,0x43,0x92,0xf8,0xfb,0xff,0xff]
           vgetexpsd -1032(%rdx), %xmm7, %xmm2
 
+// CHECK: vextractf32x4 $171, %zmm21, %xmm15
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x48,0x19,0xef,0xab]
+          vextractf32x4 $0xab, %zmm21, %xmm15
+
+// CHECK: vextractf32x4 $171, %zmm21, %xmm15 {%k1}
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x49,0x19,0xef,0xab]
+          vextractf32x4 $0xab, %zmm21, %xmm15 {%k1}
+
+// CHECK: vextractf32x4 $171, %zmm21, %xmm15 {%k1} {z}
+// CHECK:  encoding: [0x62,0xc3,0x7d,0xc9,0x19,0xef,0xab]
+          vextractf32x4 $0xab, %zmm21, %xmm15 {%k1} {z}
+
+// CHECK: vextractf32x4 $123, %zmm21, %xmm15
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x48,0x19,0xef,0x7b]
+          vextractf32x4 $0x7b, %zmm21, %xmm15
+
+// CHECK: vextractf32x4 $171, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0x21,0xab]
+          vextractf32x4 $0xab, %zmm20, (%rcx)
+
+// CHECK: vextractf32x4 $171, %zmm20, (%rcx) {%k7}
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x4f,0x19,0x21,0xab]
+          vextractf32x4 $0xab, %zmm20, (%rcx) {%k7}
+
+// CHECK: vextractf32x4 $123, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0x21,0x7b]
+          vextractf32x4 $0x7b, %zmm20, (%rcx)
+
+// CHECK: vextractf32x4 $123, %zmm20, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x19,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf32x4 $0x7b, %zmm20, 291(%rax,%r14,8)
+
+// CHECK: vextractf32x4 $123, %zmm20, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0x62,0x7f,0x7b]
+          vextractf32x4 $0x7b, %zmm20, 2032(%rdx)
+
+// CHECK: vextractf32x4 $123, %zmm20, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextractf32x4 $0x7b, %zmm20, 2048(%rdx)
+
+// CHECK: vextractf32x4 $123, %zmm20, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0x62,0x80,0x7b]
+          vextractf32x4 $0x7b, %zmm20, -2048(%rdx)
+
+// CHECK: vextractf32x4 $123, %zmm20, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x19,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf32x4 $0x7b, %zmm20, -2064(%rdx)
+
+// CHECK: vextractf64x4 $171, %zmm24, %ymm11
+// CHECK:  encoding: [0x62,0x43,0xfd,0x48,0x1b,0xc3,0xab]
+          vextractf64x4 $0xab, %zmm24, %ymm11
+
+// CHECK: vextractf64x4 $171, %zmm24, %ymm11 {%k5}
+// CHECK:  encoding: [0x62,0x43,0xfd,0x4d,0x1b,0xc3,0xab]
+          vextractf64x4 $0xab, %zmm24, %ymm11 {%k5}
+
+// CHECK: vextractf64x4 $171, %zmm24, %ymm11 {%k5} {z}
+// CHECK:  encoding: [0x62,0x43,0xfd,0xcd,0x1b,0xc3,0xab]
+          vextractf64x4 $0xab, %zmm24, %ymm11 {%k5} {z}
+
+// CHECK: vextractf64x4 $123, %zmm24, %ymm11
+// CHECK:  encoding: [0x62,0x43,0xfd,0x48,0x1b,0xc3,0x7b]
+          vextractf64x4 $0x7b, %zmm24, %ymm11
+
+// CHECK: vextractf64x4 $171, %zmm5, (%rcx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0x29,0xab]
+          vextractf64x4 $0xab, %zmm5, (%rcx)
+
+// CHECK: vextractf64x4 $171, %zmm5, (%rcx) {%k4}
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x4c,0x1b,0x29,0xab]
+          vextractf64x4 $0xab, %zmm5, (%rcx) {%k4}
+
+// CHECK: vextractf64x4 $123, %zmm5, (%rcx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0x29,0x7b]
+          vextractf64x4 $0x7b, %zmm5, (%rcx)
+
+// CHECK: vextractf64x4 $123, %zmm5, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xb3,0xfd,0x48,0x1b,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf64x4 $0x7b, %zmm5, 291(%rax,%r14,8)
+
+// CHECK: vextractf64x4 $123, %zmm5, 4064(%rdx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0x6a,0x7f,0x7b]
+          vextractf64x4 $0x7b, %zmm5, 4064(%rdx)
+
+// CHECK: vextractf64x4 $123, %zmm5, 4096(%rdx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0xaa,0x00,0x10,0x00,0x00,0x7b]
+          vextractf64x4 $0x7b, %zmm5, 4096(%rdx)
+
+// CHECK: vextractf64x4 $123, %zmm5, -4096(%rdx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0x6a,0x80,0x7b]
+          vextractf64x4 $0x7b, %zmm5, -4096(%rdx)
+
+// CHECK: vextractf64x4 $123, %zmm5, -4128(%rdx)
+// CHECK:  encoding: [0x62,0xf3,0xfd,0x48,0x1b,0xaa,0xe0,0xef,0xff,0xff,0x7b]
+          vextractf64x4 $0x7b, %zmm5, -4128(%rdx)
+
+// CHECK: vextracti32x4 $171, %zmm16, %xmm13
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x48,0x39,0xc5,0xab]
+          vextracti32x4 $0xab, %zmm16, %xmm13
+
+// CHECK: vextracti32x4 $171, %zmm16, %xmm13 {%k5}
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x4d,0x39,0xc5,0xab]
+          vextracti32x4 $0xab, %zmm16, %xmm13 {%k5}
+
+// CHECK: vextracti32x4 $171, %zmm16, %xmm13 {%k5} {z}
+// CHECK:  encoding: [0x62,0xc3,0x7d,0xcd,0x39,0xc5,0xab]
+          vextracti32x4 $0xab, %zmm16, %xmm13 {%k5} {z}
+
+// CHECK: vextracti32x4 $123, %zmm16, %xmm13
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x48,0x39,0xc5,0x7b]
+          vextracti32x4 $0x7b, %zmm16, %xmm13
+
+// CHECK: vextracti32x4 $171, %zmm29, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0x29,0xab]
+          vextracti32x4 $0xab, %zmm29, (%rcx)
+
+// CHECK: vextracti32x4 $171, %zmm29, (%rcx) {%k2}
+// CHECK:  encoding: [0x62,0x63,0x7d,0x4a,0x39,0x29,0xab]
+          vextracti32x4 $0xab, %zmm29, (%rcx) {%k2}
+
+// CHECK: vextracti32x4 $123, %zmm29, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0x29,0x7b]
+          vextracti32x4 $0x7b, %zmm29, (%rcx)
+
+// CHECK: vextracti32x4 $123, %zmm29, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0x7d,0x48,0x39,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti32x4 $0x7b, %zmm29, 291(%rax,%r14,8)
+
+// CHECK: vextracti32x4 $123, %zmm29, 2032(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0x6a,0x7f,0x7b]
+          vextracti32x4 $0x7b, %zmm29, 2032(%rdx)
+
+// CHECK: vextracti32x4 $123, %zmm29, 2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0xaa,0x00,0x08,0x00,0x00,0x7b]
+          vextracti32x4 $0x7b, %zmm29, 2048(%rdx)
+
+// CHECK: vextracti32x4 $123, %zmm29, -2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0x6a,0x80,0x7b]
+          vextracti32x4 $0x7b, %zmm29, -2048(%rdx)
+
+// CHECK: vextracti32x4 $123, %zmm29, -2064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x39,0xaa,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti32x4 $0x7b, %zmm29, -2064(%rdx)
+
+// CHECK: vextracti64x4 $171, %zmm16, %ymm13
+// CHECK:  encoding: [0x62,0xc3,0xfd,0x48,0x3b,0xc5,0xab]
+          vextracti64x4 $0xab, %zmm16, %ymm13
+
+// CHECK: vextracti64x4 $171, %zmm16, %ymm13 {%k3}
+// CHECK:  encoding: [0x62,0xc3,0xfd,0x4b,0x3b,0xc5,0xab]
+          vextracti64x4 $0xab, %zmm16, %ymm13 {%k3}
+
+// CHECK: vextracti64x4 $171, %zmm16, %ymm13 {%k3} {z}
+// CHECK:  encoding: [0x62,0xc3,0xfd,0xcb,0x3b,0xc5,0xab]
+          vextracti64x4 $0xab, %zmm16, %ymm13 {%k3} {z}
+
+// CHECK: vextracti64x4 $123, %zmm16, %ymm13
+// CHECK:  encoding: [0x62,0xc3,0xfd,0x48,0x3b,0xc5,0x7b]
+          vextracti64x4 $0x7b, %zmm16, %ymm13
+
+// CHECK: vextracti64x4 $171, %zmm30, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0x31,0xab]
+          vextracti64x4 $0xab, %zmm30, (%rcx)
+
+// CHECK: vextracti64x4 $171, %zmm30, (%rcx) {%k4}
+// CHECK:  encoding: [0x62,0x63,0xfd,0x4c,0x3b,0x31,0xab]
+          vextracti64x4 $0xab, %zmm30, (%rcx) {%k4}
+
+// CHECK: vextracti64x4 $123, %zmm30, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0x31,0x7b]
+          vextracti64x4 $0x7b, %zmm30, (%rcx)
+
+// CHECK: vextracti64x4 $123, %zmm30, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0xfd,0x48,0x3b,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti64x4 $0x7b, %zmm30, 291(%rax,%r14,8)
+
+// CHECK: vextracti64x4 $123, %zmm30, 4064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0x72,0x7f,0x7b]
+          vextracti64x4 $0x7b, %zmm30, 4064(%rdx)
+
+// CHECK: vextracti64x4 $123, %zmm30, 4096(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0xb2,0x00,0x10,0x00,0x00,0x7b]
+          vextracti64x4 $0x7b, %zmm30, 4096(%rdx)
+
+// CHECK: vextracti64x4 $123, %zmm30, -4096(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0x72,0x80,0x7b]
+          vextracti64x4 $0x7b, %zmm30, -4096(%rdx)
+
+// CHECK: vextracti64x4 $123, %zmm30, -4128(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x3b,0xb2,0xe0,0xef,0xff,0xff,0x7b]
+          vextracti64x4 $0x7b, %zmm30, -4128(%rdx)
+
 // CHECK: kunpckbw %k6, %k5, %k5
 // CHECK:  encoding: [0xc5,0xd5,0x4b,0xee]
           kunpckbw %k6, %k5, %k5

Modified: llvm/trunk/test/MC/X86/x86-64-avx512dq.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86-64-avx512dq.s?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86-64-avx512dq.s (original)
+++ llvm/trunk/test/MC/X86/x86-64-avx512dq.s Wed Sep  9 09:35:09 2015
@@ -2371,6 +2371,390 @@
 // CHECK:  encoding: [0x62,0xa1,0xff,0xca,0x7a,0xd5]
           vcvtuqq2ps %zmm21, %ymm18 {%k2} {z}
 
+// CHECK: vextractf32x8 $171, %zmm18, %ymm21
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x1b,0xd5,0xab]
+          vextractf32x8 $0xab, %zmm18, %ymm21
+
+// CHECK: vextractf32x8 $171, %zmm18, %ymm21 {%k1}
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x49,0x1b,0xd5,0xab]
+          vextractf32x8 $0xab, %zmm18, %ymm21 {%k1}
+
+// CHECK: vextractf32x8 $171, %zmm18, %ymm21 {%k1} {z}
+// CHECK:  encoding: [0x62,0xa3,0x7d,0xc9,0x1b,0xd5,0xab]
+          vextractf32x8 $0xab, %zmm18, %ymm21 {%k1} {z}
+
+// CHECK: vextractf32x8 $123, %zmm18, %ymm21
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x1b,0xd5,0x7b]
+          vextractf32x8 $0x7b, %zmm18, %ymm21
+
+// CHECK: vextractf32x8 $171, %zmm21, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x29,0xab]
+          vextractf32x8 $0xab, %zmm21,(%rcx)
+
+// CHECK: vextractf32x8 $171, %zmm21, (%rcx) {%k3}
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x4b,0x1b,0x29,0xab]
+          vextractf32x8 $0xab, %zmm21,(%rcx) {%k3}
+
+// CHECK: vextractf32x8 $123, %zmm21, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x29,0x7b]
+          vextractf32x8 $0x7b, %zmm21,(%rcx)
+
+// CHECK: vextractf32x8 $123, %zmm21, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x1b,0xac,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf32x8 $0x7b, %zmm21,291(%rax,%r14,8)
+
+// CHECK: vextractf32x8 $123, %zmm21, 4064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x6a,0x7f,0x7b]
+          vextractf32x8 $0x7b, %zmm21,4064(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm21, 4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0xaa,0x00,0x10,0x00,0x00,0x7b]
+          vextractf32x8 $0x7b, %zmm21,4096(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm21, -4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x6a,0x80,0x7b]
+          vextractf32x8 $0x7b, %zmm21,-4096(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm21, -4128(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0xaa,0xe0,0xef,0xff,0xff,0x7b]
+          vextractf32x8 $0x7b, %zmm21,-4128(%rdx)
+
+// CHECK: vextractf32x8 $171, %zmm26, %ymm30
+// CHECK:  encoding: [0x62,0x03,0x7d,0x48,0x1b,0xd6,0xab]
+          vextractf32x8 $0xab, %zmm26, %ymm30
+
+// CHECK: vextractf32x8 $171, %zmm26, %ymm30 {%k3}
+// CHECK:  encoding: [0x62,0x03,0x7d,0x4b,0x1b,0xd6,0xab]
+          vextractf32x8 $0xab, %zmm26, %ymm30 {%k3}
+
+// CHECK: vextractf32x8 $171, %zmm26, %ymm30 {%k3} {z}
+// CHECK:  encoding: [0x62,0x03,0x7d,0xcb,0x1b,0xd6,0xab]
+          vextractf32x8 $0xab, %zmm26, %ymm30 {%k3} {z}
+
+// CHECK: vextractf32x8 $123, %zmm26, %ymm30
+// CHECK:  encoding: [0x62,0x03,0x7d,0x48,0x1b,0xd6,0x7b]
+          vextractf32x8 $0x7b, %zmm26, %ymm30
+
+// CHECK: vextractf32x8 $171, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x21,0xab]
+          vextractf32x8 $0xab, %zmm20,(%rcx)
+
+// CHECK: vextractf32x8 $171, %zmm20, (%rcx) {%k3}
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x4b,0x1b,0x21,0xab]
+          vextractf32x8 $0xab, %zmm20,(%rcx) {%k3}
+
+// CHECK: vextractf32x8 $123, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x21,0x7b]
+          vextractf32x8 $0x7b, %zmm20,(%rcx)
+
+// CHECK: vextractf32x8 $123, %zmm20, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x1b,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextractf32x8 $0x7b, %zmm20,4660(%rax,%r14,8)
+
+// CHECK: vextractf32x8 $123, %zmm20, 4064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x62,0x7f,0x7b]
+          vextractf32x8 $0x7b, %zmm20,4064(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm20, 4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0xa2,0x00,0x10,0x00,0x00,0x7b]
+          vextractf32x8 $0x7b, %zmm20,4096(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm20, -4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0x62,0x80,0x7b]
+          vextractf32x8 $0x7b, %zmm20,-4096(%rdx)
+
+// CHECK: vextractf32x8 $123, %zmm20, -4128(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x1b,0xa2,0xe0,0xef,0xff,0xff,0x7b]
+          vextractf32x8 $0x7b, %zmm20,-4128(%rdx)
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm28
+// CHECK:  encoding: [0x62,0x03,0xfd,0x48,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm28
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm28 {%k5}
+// CHECK:  encoding: [0x62,0x03,0xfd,0x4d,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm28 {%k5}
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm28 {%k5} {z}
+// CHECK:  encoding: [0x62,0x03,0xfd,0xcd,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm28 {%k5} {z}
+
+// CHECK: vextractf64x2 $123, %zmm26, %xmm28
+// CHECK:  encoding: [0x62,0x03,0xfd,0x48,0x19,0xd4,0x7b]
+          vextractf64x2 $0x7b, %zmm26, %xmm28
+
+// CHECK: vextractf64x2 $171, %zmm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0x21,0xab]
+          vextractf64x2 $0xab, %zmm28,(%rcx)
+
+// CHECK: vextractf64x2 $171, %zmm28, (%rcx) {%k3}
+// CHECK:  encoding: [0x62,0x63,0xfd,0x4b,0x19,0x21,0xab]
+          vextractf64x2 $0xab, %zmm28,(%rcx) {%k3}
+
+// CHECK: vextractf64x2 $123, %zmm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0x21,0x7b]
+          vextractf64x2 $0x7b, %zmm28,(%rcx)
+
+// CHECK: vextractf64x2 $123, %zmm28, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0xfd,0x48,0x19,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %zmm28,291(%rax,%r14,8)
+
+// CHECK: vextractf64x2 $123, %zmm28, 2032(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0x62,0x7f,0x7b]
+          vextractf64x2 $0x7b, %zmm28,2032(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm28, 2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %zmm28,2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm28, -2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0x62,0x80,0x7b]
+          vextractf64x2 $0x7b, %zmm28,-2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm28, -2064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x19,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf64x2 $0x7b, %zmm28,-2064(%rdx)
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm19
+// CHECK:  encoding: [0x62,0x23,0xfd,0x48,0x19,0xd3,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm19
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm19 {%k3}
+// CHECK:  encoding: [0x62,0x23,0xfd,0x4b,0x19,0xd3,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm19 {%k3}
+
+// CHECK: vextractf64x2 $171, %zmm26, %xmm19 {%k3} {z}
+// CHECK:  encoding: [0x62,0x23,0xfd,0xcb,0x19,0xd3,0xab]
+          vextractf64x2 $0xab, %zmm26, %xmm19 {%k3} {z}
+
+// CHECK: vextractf64x2 $123, %zmm26, %xmm19
+// CHECK:  encoding: [0x62,0x23,0xfd,0x48,0x19,0xd3,0x7b]
+          vextractf64x2 $0x7b, %zmm26, %xmm19
+
+// CHECK: vextractf64x2 $171, %zmm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x09,0xab]
+          vextractf64x2 $0xab, %zmm17,(%rcx)
+
+// CHECK: vextractf64x2 $171, %zmm17, (%rcx) {%k1}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x49,0x19,0x09,0xab]
+          vextractf64x2 $0xab, %zmm17,(%rcx) {%k1}
+
+// CHECK: vextractf64x2 $123, %zmm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x09,0x7b]
+          vextractf64x2 $0x7b, %zmm17,(%rcx)
+
+// CHECK: vextractf64x2 $123, %zmm17, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x48,0x19,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %zmm17,4660(%rax,%r14,8)
+
+// CHECK: vextractf64x2 $123, %zmm17, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x4a,0x7f,0x7b]
+          vextractf64x2 $0x7b, %zmm17,2032(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm17, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x8a,0x00,0x08,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %zmm17,2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm17, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x4a,0x80,0x7b]
+          vextractf64x2 $0x7b, %zmm17,-2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %zmm17, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x19,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf64x2 $0x7b, %zmm17,-2064(%rdx)
+
+// CHECK: vextracti32x8 $171, %zmm24, %ymm20
+// CHECK:  encoding: [0x62,0x23,0x7d,0x48,0x3b,0xc4,0xab]
+          vextracti32x8 $0xab, %zmm24, %ymm20
+
+// CHECK: vextracti32x8 $171, %zmm24, %ymm20 {%k1}
+// CHECK:  encoding: [0x62,0x23,0x7d,0x49,0x3b,0xc4,0xab]
+          vextracti32x8 $0xab, %zmm24, %ymm20 {%k1}
+
+// CHECK: vextracti32x8 $171, %zmm24, %ymm20 {%k1} {z}
+// CHECK:  encoding: [0x62,0x23,0x7d,0xc9,0x3b,0xc4,0xab]
+          vextracti32x8 $0xab, %zmm24, %ymm20 {%k1} {z}
+
+// CHECK: vextracti32x8 $123, %zmm24, %ymm20
+// CHECK:  encoding: [0x62,0x23,0x7d,0x48,0x3b,0xc4,0x7b]
+          vextracti32x8 $0x7b, %zmm24, %ymm20
+
+// CHECK: vextracti32x8 $171, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0x21,0xab]
+          vextracti32x8 $0xab, %zmm20,(%rcx)
+
+// CHECK: vextracti32x8 $171, %zmm20, (%rcx) {%k3}
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x4b,0x3b,0x21,0xab]
+          vextracti32x8 $0xab, %zmm20,(%rcx) {%k3}
+
+// CHECK: vextracti32x8 $123, %zmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0x21,0x7b]
+          vextracti32x8 $0x7b, %zmm20,(%rcx)
+
+// CHECK: vextracti32x8 $123, %zmm20, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x48,0x3b,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti32x8 $0x7b, %zmm20,291(%rax,%r14,8)
+
+// CHECK: vextracti32x8 $123, %zmm20, 4064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0x62,0x7f,0x7b]
+          vextracti32x8 $0x7b, %zmm20,4064(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm20, 4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0xa2,0x00,0x10,0x00,0x00,0x7b]
+          vextracti32x8 $0x7b, %zmm20,4096(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm20, -4096(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0x62,0x80,0x7b]
+          vextracti32x8 $0x7b, %zmm20,-4096(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm20, -4128(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x48,0x3b,0xa2,0xe0,0xef,0xff,0xff,0x7b]
+          vextracti32x8 $0x7b, %zmm20,-4128(%rdx)
+
+// CHECK: vextracti32x8 $171, %zmm29, %ymm27
+// CHECK:  encoding: [0x62,0x03,0x7d,0x48,0x3b,0xeb,0xab]
+          vextracti32x8 $0xab, %zmm29, %ymm27
+
+// CHECK: vextracti32x8 $171, %zmm29, %ymm27 {%k2}
+// CHECK:  encoding: [0x62,0x03,0x7d,0x4a,0x3b,0xeb,0xab]
+          vextracti32x8 $0xab, %zmm29, %ymm27 {%k2}
+
+// CHECK: vextracti32x8 $171, %zmm29, %ymm27 {%k2} {z}
+// CHECK:  encoding: [0x62,0x03,0x7d,0xca,0x3b,0xeb,0xab]
+          vextracti32x8 $0xab, %zmm29, %ymm27 {%k2} {z}
+
+// CHECK: vextracti32x8 $123, %zmm29, %ymm27
+// CHECK:  encoding: [0x62,0x03,0x7d,0x48,0x3b,0xeb,0x7b]
+          vextracti32x8 $0x7b, %zmm29, %ymm27
+
+// CHECK: vextracti32x8 $171, %zmm26, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x11,0xab]
+          vextracti32x8 $0xab, %zmm26,(%rcx)
+
+// CHECK: vextracti32x8 $171, %zmm26, (%rcx) {%k2}
+// CHECK:  encoding: [0x62,0x63,0x7d,0x4a,0x3b,0x11,0xab]
+          vextracti32x8 $0xab, %zmm26,(%rcx) {%k2}
+
+// CHECK: vextracti32x8 $123, %zmm26, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x11,0x7b]
+          vextracti32x8 $0x7b, %zmm26,(%rcx)
+
+// CHECK: vextracti32x8 $123, %zmm26, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0x7d,0x48,0x3b,0x94,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextracti32x8 $0x7b, %zmm26,4660(%rax,%r14,8)
+
+// CHECK: vextracti32x8 $123, %zmm26, 4064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x52,0x7f,0x7b]
+          vextracti32x8 $0x7b, %zmm26,4064(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm26, 4096(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x92,0x00,0x10,0x00,0x00,0x7b]
+          vextracti32x8 $0x7b, %zmm26,4096(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm26, -4096(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x52,0x80,0x7b]
+          vextracti32x8 $0x7b, %zmm26,-4096(%rdx)
+
+// CHECK: vextracti32x8 $123, %zmm26, -4128(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x48,0x3b,0x92,0xe0,0xef,0xff,0xff,0x7b]
+          vextracti32x8 $0x7b, %zmm26,-4128(%rdx)
+
+// CHECK: vextracti64x2 $171, %zmm20, %xmm17
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x48,0x39,0xe1,0xab]
+          vextracti64x2 $0xab, %zmm20, %xmm17
+
+// CHECK: vextracti64x2 $171, %zmm20, %xmm17 {%k2}
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x4a,0x39,0xe1,0xab]
+          vextracti64x2 $0xab, %zmm20, %xmm17 {%k2}
+
+// CHECK: vextracti64x2 $171, %zmm20, %xmm17 {%k2} {z}
+// CHECK:  encoding: [0x62,0xa3,0xfd,0xca,0x39,0xe1,0xab]
+          vextracti64x2 $0xab, %zmm20, %xmm17 {%k2} {z}
+
+// CHECK: vextracti64x2 $123, %zmm20, %xmm17
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x48,0x39,0xe1,0x7b]
+          vextracti64x2 $0x7b, %zmm20, %xmm17
+
+// CHECK: vextracti64x2 $171, %zmm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x09,0xab]
+          vextracti64x2 $0xab, %zmm17,(%rcx)
+
+// CHECK: vextracti64x2 $171, %zmm17, (%rcx) {%k5}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x4d,0x39,0x09,0xab]
+          vextracti64x2 $0xab, %zmm17,(%rcx) {%k5}
+
+// CHECK: vextracti64x2 $123, %zmm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x09,0x7b]
+          vextracti64x2 $0x7b, %zmm17,(%rcx)
+
+// CHECK: vextracti64x2 $123, %zmm17, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x48,0x39,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %zmm17,291(%rax,%r14,8)
+
+// CHECK: vextracti64x2 $123, %zmm17, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x4a,0x7f,0x7b]
+          vextracti64x2 $0x7b, %zmm17,2032(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm17, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x8a,0x00,0x08,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %zmm17,2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm17, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x4a,0x80,0x7b]
+          vextracti64x2 $0x7b, %zmm17,-2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm17, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x48,0x39,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti64x2 $0x7b, %zmm17,-2064(%rdx)
+
+// CHECK: vextracti64x2 $171, %zmm23, %xmm27
+// CHECK:  encoding: [0x62,0x83,0xfd,0x48,0x39,0xfb,0xab]
+          vextracti64x2 $0xab, %zmm23, %xmm27
+
+// CHECK: vextracti64x2 $171, %zmm23, %xmm27 {%k5}
+// CHECK:  encoding: [0x62,0x83,0xfd,0x4d,0x39,0xfb,0xab]
+          vextracti64x2 $0xab, %zmm23, %xmm27 {%k5}
+
+// CHECK: vextracti64x2 $171, %zmm23, %xmm27 {%k5} {z}
+// CHECK:  encoding: [0x62,0x83,0xfd,0xcd,0x39,0xfb,0xab]
+          vextracti64x2 $0xab, %zmm23, %xmm27 {%k5} {z}
+
+// CHECK: vextracti64x2 $123, %zmm23, %xmm27
+// CHECK:  encoding: [0x62,0x83,0xfd,0x48,0x39,0xfb,0x7b]
+          vextracti64x2 $0x7b, %zmm23, %xmm27
+
+// CHECK: vextracti64x2 $171, %zmm24, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x01,0xab]
+          vextracti64x2 $0xab, %zmm24,(%rcx)
+
+// CHECK: vextracti64x2 $171, %zmm24, (%rcx) {%k3}
+// CHECK:  encoding: [0x62,0x63,0xfd,0x4b,0x39,0x01,0xab]
+          vextracti64x2 $0xab, %zmm24,(%rcx) {%k3}
+
+// CHECK: vextracti64x2 $123, %zmm24, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x01,0x7b]
+          vextracti64x2 $0x7b, %zmm24,(%rcx)
+
+// CHECK: vextracti64x2 $123, %zmm24, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0xfd,0x48,0x39,0x84,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %zmm24,4660(%rax,%r14,8)
+
+// CHECK: vextracti64x2 $123, %zmm24, 2032(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x42,0x7f,0x7b]
+          vextracti64x2 $0x7b, %zmm24,2032(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm24, 2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x82,0x00,0x08,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %zmm24,2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm24, -2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x42,0x80,0x7b]
+          vextracti64x2 $0x7b, %zmm24,-2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %zmm24, -2064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x48,0x39,0x82,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti64x2 $0x7b, %zmm24,-2064(%rdx)
+
 // CHECK: ktestb %k6, %k4
 // CHECK:  encoding: [0xc5,0xf9,0x99,0xe6]
           ktestb %k6, %k4

Modified: llvm/trunk/test/MC/X86/x86-64-avx512dq_vl.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86-64-avx512dq_vl.s?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86-64-avx512dq_vl.s (original)
+++ llvm/trunk/test/MC/X86/x86-64-avx512dq_vl.s Wed Sep  9 09:35:09 2015
@@ -3584,3 +3584,195 @@
 // CHECK:  encoding: [0x62,0x61,0xff,0x38,0x7a,0xa2,0xf8,0xfb,0xff,0xff]
           vcvtuqq2ps -1032(%rdx){1to4}, %xmm28
 
+// CHECK: vextractf64x2 $171, %ymm21, %xmm27
+// CHECK:  encoding: [0x62,0x83,0xfd,0x28,0x19,0xeb,0xab]
+          vextractf64x2 $0xab, %ymm21, %xmm27
+
+// CHECK: vextractf64x2 $171, %ymm21, %xmm27 {%k7}
+// CHECK:  encoding: [0x62,0x83,0xfd,0x2f,0x19,0xeb,0xab]
+          vextractf64x2 $0xab, %ymm21, %xmm27 {%k7}
+
+// CHECK: vextractf64x2 $171, %ymm21, %xmm27 {%k7} {z}
+// CHECK:  encoding: [0x62,0x83,0xfd,0xaf,0x19,0xeb,0xab]
+          vextractf64x2 $0xab, %ymm21, %xmm27 {%k7} {z}
+
+// CHECK: vextractf64x2 $123, %ymm21, %xmm27
+// CHECK:  encoding: [0x62,0x83,0xfd,0x28,0x19,0xeb,0x7b]
+          vextractf64x2 $0x7b, %ymm21, %xmm27
+
+// CHECK: vextractf64x2 $171, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x21,0xab]
+          vextractf64x2 $0xab, %ymm20,(%rcx)
+
+// CHECK: vextractf64x2 $171, %ymm20, (%rcx) {%k1}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x29,0x19,0x21,0xab]
+          vextractf64x2 $0xab, %ymm20,(%rcx) {%k1}
+
+// CHECK: vextractf64x2 $123, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x21,0x7b]
+          vextractf64x2 $0x7b, %ymm20,(%rcx)
+
+// CHECK: vextractf64x2 $123, %ymm20, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x28,0x19,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %ymm20,291(%rax,%r14,8)
+
+// CHECK: vextractf64x2 $123, %ymm20, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x62,0x7f,0x7b]
+          vextractf64x2 $0x7b, %ymm20,2032(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm20, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %ymm20,2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm20, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x62,0x80,0x7b]
+          vextractf64x2 $0x7b, %ymm20,-2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm20, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf64x2 $0x7b, %ymm20,-2064(%rdx)
+
+// CHECK: vextractf64x2 $171, %ymm26, %xmm28
+// CHECK:  encoding: [0x62,0x03,0xfd,0x28,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %ymm26, %xmm28
+
+// CHECK: vextractf64x2 $171, %ymm26, %xmm28 {%k4}
+// CHECK:  encoding: [0x62,0x03,0xfd,0x2c,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %ymm26, %xmm28 {%k4}
+
+// CHECK: vextractf64x2 $171, %ymm26, %xmm28 {%k4} {z}
+// CHECK:  encoding: [0x62,0x03,0xfd,0xac,0x19,0xd4,0xab]
+          vextractf64x2 $0xab, %ymm26, %xmm28 {%k4} {z}
+
+// CHECK: vextractf64x2 $123, %ymm26, %xmm28
+// CHECK:  encoding: [0x62,0x03,0xfd,0x28,0x19,0xd4,0x7b]
+          vextractf64x2 $0x7b, %ymm26, %xmm28
+
+// CHECK: vextractf64x2 $171, %ymm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x09,0xab]
+          vextractf64x2 $0xab, %ymm17,(%rcx)
+
+// CHECK: vextractf64x2 $171, %ymm17, (%rcx) {%k2}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x2a,0x19,0x09,0xab]
+          vextractf64x2 $0xab, %ymm17,(%rcx) {%k2}
+
+// CHECK: vextractf64x2 $123, %ymm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x09,0x7b]
+          vextractf64x2 $0x7b, %ymm17,(%rcx)
+
+// CHECK: vextractf64x2 $123, %ymm17, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x28,0x19,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %ymm17,4660(%rax,%r14,8)
+
+// CHECK: vextractf64x2 $123, %ymm17, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x4a,0x7f,0x7b]
+          vextractf64x2 $0x7b, %ymm17,2032(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm17, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x8a,0x00,0x08,0x00,0x00,0x7b]
+          vextractf64x2 $0x7b, %ymm17,2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm17, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x4a,0x80,0x7b]
+          vextractf64x2 $0x7b, %ymm17,-2048(%rdx)
+
+// CHECK: vextractf64x2 $123, %ymm17, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x19,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf64x2 $0x7b, %ymm17,-2064(%rdx)
+
+// CHECK: vextracti64x2 $171, %ymm24, %xmm29
+// CHECK:  encoding: [0x62,0x03,0xfd,0x28,0x39,0xc5,0xab]
+          vextracti64x2 $0xab, %ymm24, %xmm29
+
+// CHECK: vextracti64x2 $171, %ymm24, %xmm29 {%k7}
+// CHECK:  encoding: [0x62,0x03,0xfd,0x2f,0x39,0xc5,0xab]
+          vextracti64x2 $0xab, %ymm24, %xmm29 {%k7}
+
+// CHECK: vextracti64x2 $171, %ymm24, %xmm29 {%k7} {z}
+// CHECK:  encoding: [0x62,0x03,0xfd,0xaf,0x39,0xc5,0xab]
+          vextracti64x2 $0xab, %ymm24, %xmm29 {%k7} {z}
+
+// CHECK: vextracti64x2 $123, %ymm24, %xmm29
+// CHECK:  encoding: [0x62,0x03,0xfd,0x28,0x39,0xc5,0x7b]
+          vextracti64x2 $0x7b, %ymm24, %xmm29
+
+// CHECK: vextracti64x2 $171, %ymm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x09,0xab]
+          vextracti64x2 $0xab, %ymm17,(%rcx)
+
+// CHECK: vextracti64x2 $171, %ymm17, (%rcx) {%k1}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x29,0x39,0x09,0xab]
+          vextracti64x2 $0xab, %ymm17,(%rcx) {%k1}
+
+// CHECK: vextracti64x2 $123, %ymm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x09,0x7b]
+          vextracti64x2 $0x7b, %ymm17,(%rcx)
+
+// CHECK: vextracti64x2 $123, %ymm17, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x28,0x39,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %ymm17,291(%rax,%r14,8)
+
+// CHECK: vextracti64x2 $123, %ymm17, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x4a,0x7f,0x7b]
+          vextracti64x2 $0x7b, %ymm17,2032(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm17, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x8a,0x00,0x08,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %ymm17,2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm17, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x4a,0x80,0x7b]
+          vextracti64x2 $0x7b, %ymm17,-2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm17, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x8a,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti64x2 $0x7b, %ymm17,-2064(%rdx)
+
+// CHECK: vextracti64x2 $171, %ymm17, %xmm29
+// CHECK:  encoding: [0x62,0x83,0xfd,0x28,0x39,0xcd,0xab]
+          vextracti64x2 $0xab, %ymm17, %xmm29
+
+// CHECK: vextracti64x2 $171, %ymm17, %xmm29 {%k5}
+// CHECK:  encoding: [0x62,0x83,0xfd,0x2d,0x39,0xcd,0xab]
+          vextracti64x2 $0xab, %ymm17, %xmm29 {%k5}
+
+// CHECK: vextracti64x2 $171, %ymm17, %xmm29 {%k5} {z}
+// CHECK:  encoding: [0x62,0x83,0xfd,0xad,0x39,0xcd,0xab]
+          vextracti64x2 $0xab, %ymm17, %xmm29 {%k5} {z}
+
+// CHECK: vextracti64x2 $123, %ymm17, %xmm29
+// CHECK:  encoding: [0x62,0x83,0xfd,0x28,0x39,0xcd,0x7b]
+          vextracti64x2 $0x7b, %ymm17, %xmm29
+
+// CHECK: vextracti64x2 $171, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x21,0xab]
+          vextracti64x2 $0xab, %ymm20,(%rcx)
+
+// CHECK: vextracti64x2 $171, %ymm20, (%rcx) {%k2}
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x2a,0x39,0x21,0xab]
+          vextracti64x2 $0xab, %ymm20,(%rcx) {%k2}
+
+// CHECK: vextracti64x2 $123, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x21,0x7b]
+          vextracti64x2 $0x7b, %ymm20,(%rcx)
+
+// CHECK: vextracti64x2 $123, %ymm20, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x28,0x39,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %ymm20,4660(%rax,%r14,8)
+
+// CHECK: vextracti64x2 $123, %ymm20, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x62,0x7f,0x7b]
+          vextracti64x2 $0x7b, %ymm20,2032(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm20, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextracti64x2 $0x7b, %ymm20,2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm20, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0x62,0x80,0x7b]
+          vextracti64x2 $0x7b, %ymm20,-2048(%rdx)
+
+// CHECK: vextracti64x2 $123, %ymm20, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x28,0x39,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti64x2 $0x7b, %ymm20,-2064(%rdx)
+

Modified: llvm/trunk/test/MC/X86/x86-64-avx512f_vl.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86-64-avx512f_vl.s?rev=247149&r1=247148&r2=247149&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86-64-avx512f_vl.s (original)
+++ llvm/trunk/test/MC/X86/x86-64-avx512f_vl.s Wed Sep  9 09:35:09 2015
@@ -19739,6 +19739,102 @@ vaddpd  {rz-sae}, %zmm2, %zmm1, %zmm1
 // CHECK:  encoding: [0x62,0xe1,0xe5,0x30,0x6d,0xa2,0xf8,0xfb,0xff,0xff]
           vpunpckhqdq -1032(%rdx){1to4}, %ymm19, %ymm20
 
+// CHECK: vextractf32x4 $171, %ymm17, %xmm28
+// CHECK:  encoding: [0x62,0x83,0x7d,0x28,0x19,0xcc,0xab]
+          vextractf32x4 $0xab, %ymm17, %xmm28
+
+// CHECK: vextractf32x4 $171, %ymm17, %xmm28 {%k6}
+// CHECK:  encoding: [0x62,0x83,0x7d,0x2e,0x19,0xcc,0xab]
+          vextractf32x4 $0xab, %ymm17, %xmm28 {%k6}
+
+// CHECK: vextractf32x4 $171, %ymm17, %xmm28 {%k6} {z}
+// CHECK:  encoding: [0x62,0x83,0x7d,0xae,0x19,0xcc,0xab]
+          vextractf32x4 $0xab, %ymm17, %xmm28 {%k6} {z}
+
+// CHECK: vextractf32x4 $123, %ymm17, %xmm28
+// CHECK:  encoding: [0x62,0x83,0x7d,0x28,0x19,0xcc,0x7b]
+          vextractf32x4 $0x7b, %ymm17, %xmm28
+
+// CHECK: vextractf32x4 $171, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0x21,0xab]
+          vextractf32x4 $0xab, %ymm20, (%rcx)
+
+// CHECK: vextractf32x4 $171, %ymm20, (%rcx) {%k2}
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x2a,0x19,0x21,0xab]
+          vextractf32x4 $0xab, %ymm20, (%rcx) {%k2}
+
+// CHECK: vextractf32x4 $123, %ymm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0x21,0x7b]
+          vextractf32x4 $0x7b, %ymm20, (%rcx)
+
+// CHECK: vextractf32x4 $123, %ymm20, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x28,0x19,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextractf32x4 $0x7b, %ymm20, 291(%rax,%r14,8)
+
+// CHECK: vextractf32x4 $123, %ymm20, 2032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0x62,0x7f,0x7b]
+          vextractf32x4 $0x7b, %ymm20, 2032(%rdx)
+
+// CHECK: vextractf32x4 $123, %ymm20, 2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextractf32x4 $0x7b, %ymm20, 2048(%rdx)
+
+// CHECK: vextractf32x4 $123, %ymm20, -2048(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0x62,0x80,0x7b]
+          vextractf32x4 $0x7b, %ymm20, -2048(%rdx)
+
+// CHECK: vextractf32x4 $123, %ymm20, -2064(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x28,0x19,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextractf32x4 $0x7b, %ymm20, -2064(%rdx)
+
+// CHECK: vextracti32x4 $171, %ymm21, %xmm20
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x28,0x39,0xec,0xab]
+          vextracti32x4 $0xab, %ymm21, %xmm20
+
+// CHECK: vextracti32x4 $171, %ymm21, %xmm20 {%k4}
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x2c,0x39,0xec,0xab]
+          vextracti32x4 $0xab, %ymm21, %xmm20 {%k4}
+
+// CHECK: vextracti32x4 $171, %ymm21, %xmm20 {%k4} {z}
+// CHECK:  encoding: [0x62,0xa3,0x7d,0xac,0x39,0xec,0xab]
+          vextracti32x4 $0xab, %ymm21, %xmm20 {%k4} {z}
+
+// CHECK: vextracti32x4 $123, %ymm21, %xmm20
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x28,0x39,0xec,0x7b]
+          vextracti32x4 $0x7b, %ymm21, %xmm20
+
+// CHECK: vextracti32x4 $171, %ymm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0x21,0xab]
+          vextracti32x4 $0xab, %ymm28, (%rcx)
+
+// CHECK: vextracti32x4 $171, %ymm28, (%rcx) {%k6}
+// CHECK:  encoding: [0x62,0x63,0x7d,0x2e,0x39,0x21,0xab]
+          vextracti32x4 $0xab, %ymm28, (%rcx) {%k6}
+
+// CHECK: vextracti32x4 $123, %ymm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0x21,0x7b]
+          vextracti32x4 $0x7b, %ymm28, (%rcx)
+
+// CHECK: vextracti32x4 $123, %ymm28, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0x7d,0x28,0x39,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vextracti32x4 $0x7b, %ymm28, 291(%rax,%r14,8)
+
+// CHECK: vextracti32x4 $123, %ymm28, 2032(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0x62,0x7f,0x7b]
+          vextracti32x4 $0x7b, %ymm28, 2032(%rdx)
+
+// CHECK: vextracti32x4 $123, %ymm28, 2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0xa2,0x00,0x08,0x00,0x00,0x7b]
+          vextracti32x4 $0x7b, %ymm28, 2048(%rdx)
+
+// CHECK: vextracti32x4 $123, %ymm28, -2048(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0x62,0x80,0x7b]
+          vextracti32x4 $0x7b, %ymm28, -2048(%rdx)
+
+// CHECK: vextracti32x4 $123, %ymm28, -2064(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x28,0x39,0xa2,0xf0,0xf7,0xff,0xff,0x7b]
+          vextracti32x4 $0x7b, %ymm28, -2064(%rdx)
+
 // CHECK: vgetmantps $171, %xmm23, %xmm29
 // CHECK:  encoding: [0x62,0x23,0x7d,0x08,0x26,0xef,0xab]
           vgetmantps $0xab, %xmm23, %xmm29




More information about the llvm-commits mailing list