[llvm] r360085 - [X86] Remove the suffix on vcvt[u]si2ss/sd register variants in assembly printing.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon May 6 14:39:51 PDT 2019


Author: ctopper
Date: Mon May  6 14:39:51 2019
New Revision: 360085

URL: http://llvm.org/viewvc/llvm-project?rev=360085&view=rev
Log:
[X86] Remove the suffix on vcvt[u]si2ss/sd register variants in assembly printing.

We require d/q suffixes on the memory form of these instructions to disambiguate the memory size.
We don't require it on the register forms, but need to support parsing both with and without it.

Previously we always printed the d/q suffix on the register forms, but it's redundant and
inconsistent with gcc and objdump.

After this patch we should support the d/q for parsing, but not print it when its unneeded.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
    llvm/trunk/test/CodeGen/X86/avx512-cvt-widen.ll
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll
    llvm/trunk/test/CodeGen/X86/break-false-dep.ll
    llvm/trunk/test/CodeGen/X86/copy-propagation.ll
    llvm/trunk/test/CodeGen/X86/cvtv2f32.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion-x86-64.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion.ll
    llvm/trunk/test/CodeGen/X86/ftrunc.ll
    llvm/trunk/test/CodeGen/X86/half.ll
    llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
    llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
    llvm/trunk/test/CodeGen/X86/pr37879.ll
    llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll
    llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll
    llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll
    llvm/trunk/test/CodeGen/X86/uint64-to-float.ll
    llvm/trunk/test/CodeGen/X86/uint_to_fp.ll
    llvm/trunk/test/CodeGen/X86/vec_int_to_fp-widen.ll
    llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
    llvm/trunk/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
    llvm/trunk/test/MC/Disassembler/X86/x86-64.txt
    llvm/trunk/test/MC/X86/AVX-64.s
    llvm/trunk/test/MC/X86/AVX512F_SCALAR-64.s
    llvm/trunk/test/MC/X86/SSE-64.s
    llvm/trunk/test/MC/X86/SSE2-64.s
    llvm/trunk/test/MC/X86/avx512-encodings.s
    llvm/trunk/test/MC/X86/x86-32-coverage.s
    llvm/trunk/test/MC/X86/x86_64-avx-encoding.s
    llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/BdVer2/int-to-fpu-forwarding-2.s
    llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s
    llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse2.s
    llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s
    llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s
    llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Mon May  6 14:39:51 2019
@@ -7191,7 +7191,8 @@ defm VPMADD52HUQ : avx512_pmadd52_common
 
 multiclass avx512_vcvtsi<bits<8> opc, SDPatternOperator OpNode, X86FoldableSchedWrite sched,
                     RegisterClass SrcRC, X86VectorVTInfo DstVT,
-                    X86MemOperand x86memop, PatFrag ld_frag, string asm> {
+                    X86MemOperand x86memop, PatFrag ld_frag, string asm,
+                    string mem> {
   let hasSideEffects = 0, isCodeGenOnly = 1 in {
     def rr : SI<opc, MRMSrcReg, (outs DstVT.FRC:$dst),
               (ins DstVT.FRC:$src1, SrcRC:$src),
@@ -7200,7 +7201,7 @@ multiclass avx512_vcvtsi<bits<8> opc, SD
     let mayLoad = 1 in
       def rm : SI<opc, MRMSrcMem, (outs DstVT.FRC:$dst),
               (ins DstVT.FRC:$src1, x86memop:$src),
-              !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+              asm#"{"#mem#"}\t{$src, $src1, $dst|$dst, $src1, $src}", []>,
               EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
   } // hasSideEffects = 0
   def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
@@ -7212,16 +7213,20 @@ multiclass avx512_vcvtsi<bits<8> opc, SD
 
   def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst),
                 (ins DstVT.RC:$src1, x86memop:$src2),
-                !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+                asm#"{"#mem#"}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                 [(set DstVT.RC:$dst,
                       (OpNode (DstVT.VT DstVT.RC:$src1),
                                (ld_frag addr:$src2)))]>,
                 EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
+  def : InstAlias<"v"#asm#mem#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                  (!cast<Instruction>(NAME#"rr_Int") DstVT.RC:$dst,
+                  DstVT.RC:$src1, SrcRC:$src2), 0, "att">;
 }
 
 multiclass avx512_vcvtsi_round<bits<8> opc, SDNode OpNode,
                                X86FoldableSchedWrite sched, RegisterClass SrcRC,
-                               X86VectorVTInfo DstVT, string asm> {
+                               X86VectorVTInfo DstVT, string asm,
+                               string mem> {
   def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
               (ins DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc),
               !strconcat(asm,
@@ -7231,32 +7236,36 @@ multiclass avx512_vcvtsi_round<bits<8> o
                              SrcRC:$src2,
                              (i32 timm:$rc)))]>,
               EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched, ReadDefault, ReadInt2Fpu]>;
+  def : InstAlias<"v"#asm#mem#"\t{$src2, $rc, $src1, $dst|$dst, $src1, $rc, $src2}",
+                  (!cast<Instruction>(NAME#"rrb_Int") DstVT.RC:$dst,
+                  DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc), 0, "att">;
 }
 
 multiclass avx512_vcvtsi_common<bits<8> opc, SDNode OpNode, SDNode OpNodeRnd,
                                 X86FoldableSchedWrite sched,
                                 RegisterClass SrcRC, X86VectorVTInfo DstVT,
-                                X86MemOperand x86memop, PatFrag ld_frag, string asm> {
-  defm NAME : avx512_vcvtsi_round<opc, OpNodeRnd, sched, SrcRC, DstVT, asm>,
+                                X86MemOperand x86memop, PatFrag ld_frag,
+                                string asm, string mem> {
+  defm NAME : avx512_vcvtsi_round<opc, OpNodeRnd, sched, SrcRC, DstVT, asm, mem>,
               avx512_vcvtsi<opc, OpNode, sched, SrcRC, DstVT, x86memop,
-                            ld_frag, asm>, VEX_LIG;
+                            ld_frag, asm, mem>, VEX_LIG;
 }
 
 let Predicates = [HasAVX512] in {
 defm VCVTSI2SSZ  : avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
                                  WriteCvtI2SS, GR32,
-                                 v4f32x_info, i32mem, loadi32, "cvtsi2ss{l}">,
+                                 v4f32x_info, i32mem, loadi32, "cvtsi2ss", "l">,
                                  XS, EVEX_CD8<32, CD8VT1>;
 defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
                                  WriteCvtI2SS, GR64,
-                                 v4f32x_info, i64mem, loadi64, "cvtsi2ss{q}">,
+                                 v4f32x_info, i64mem, loadi64, "cvtsi2ss", "q">,
                                  XS, VEX_W, EVEX_CD8<64, CD8VT1>;
 defm VCVTSI2SDZ  : avx512_vcvtsi<0x2A, null_frag, WriteCvtI2SD, GR32,
-                                 v2f64x_info, i32mem, loadi32, "cvtsi2sd{l}">,
+                                 v2f64x_info, i32mem, loadi32, "cvtsi2sd", "l">,
                                  XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
 defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
                                  WriteCvtI2SD, GR64,
-                                 v2f64x_info, i64mem, loadi64, "cvtsi2sd{q}">,
+                                 v2f64x_info, i64mem, loadi64, "cvtsi2sd", "q">,
                                  XD, VEX_W, EVEX_CD8<64, CD8VT1>;
 
 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
@@ -7285,17 +7294,17 @@ def : Pat<(f64 (sint_to_fp GR64:$src)),
 defm VCVTUSI2SSZ   : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
                                   WriteCvtI2SS, GR32,
                                   v4f32x_info, i32mem, loadi32,
-                                  "cvtusi2ss{l}">, XS, EVEX_CD8<32, CD8VT1>;
+                                  "cvtusi2ss", "l">, XS, EVEX_CD8<32, CD8VT1>;
 defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
                                   WriteCvtI2SS, GR64,
-                                  v4f32x_info, i64mem, loadi64, "cvtusi2ss{q}">,
+                                  v4f32x_info, i64mem, loadi64, "cvtusi2ss", "q">,
                                   XS, VEX_W, EVEX_CD8<64, CD8VT1>;
 defm VCVTUSI2SDZ   : avx512_vcvtsi<0x7B, null_frag, WriteCvtI2SD, GR32, v2f64x_info,
-                                  i32mem, loadi32, "cvtusi2sd{l}">,
+                                  i32mem, loadi32, "cvtusi2sd", "l">,
                                   XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
 defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
                                   WriteCvtI2SD, GR64,
-                                  v2f64x_info, i64mem, loadi64, "cvtusi2sd{q}">,
+                                  v2f64x_info, i64mem, loadi64, "cvtusi2sd", "q">,
                                   XD, VEX_W, EVEX_CD8<64, CD8VT1>;
 
 def : InstAlias<"vcvtusi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Mon May  6 14:39:51 2019
@@ -822,14 +822,16 @@ let Constraints = "$src1 = $dst" in {
 
 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
                      SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
-                     string asm, X86FoldableSchedWrite sched,
+                     string asm, string mem, X86FoldableSchedWrite sched,
                      SchedRead Int2Fpu = ReadDefault> {
-  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
-                        [(set DstRC:$dst, (OpNode SrcRC:$src))]>,
-                        Sched<[sched, Int2Fpu]>;
-  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
-                        [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>,
-                        Sched<[sched.Folded]>;
+  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+              !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+              [(set DstRC:$dst, (OpNode SrcRC:$src))]>,
+              Sched<[sched, Int2Fpu]>;
+  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+              mem#"\t{$src, $dst|$dst, $src}",
+              [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>,
+              Sched<[sched.Folded]>;
 }
 
 multiclass sse12_cvt_p<bits<8> opc, RegisterClass RC, X86MemOperand x86memop,
@@ -848,7 +850,7 @@ let hasSideEffects = 0 in {
 }
 
 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
-                          X86MemOperand x86memop, string asm,
+                          X86MemOperand x86memop, string asm, string mem,
                           X86FoldableSchedWrite sched> {
 let hasSideEffects = 0, Predicates = [UseAVX] in {
   def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
@@ -857,26 +859,26 @@ let hasSideEffects = 0, Predicates = [Us
   let mayLoad = 1 in
   def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
               (ins DstRC:$src1, x86memop:$src),
-              !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
+              asm#"{"#mem#"}\t{$src, $src1, $dst|$dst, $src1, $src}", []>,
            Sched<[sched.Folded, sched.ReadAfterFold]>;
 } // hasSideEffects = 0
 }
 
 let isCodeGenOnly = 1, Predicates = [UseAVX] in {
 defm VCVTTSS2SI   : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
-                                "cvttss2si\t{$src, $dst|$dst, $src}",
+                                "cvttss2si", "cvttss2si",
                                 WriteCvtSS2I>,
                                 XS, VEX, VEX_LIG;
 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
-                                "cvttss2si\t{$src, $dst|$dst, $src}",
+                                "cvttss2si", "cvttss2si",
                                 WriteCvtSS2I>,
                                 XS, VEX, VEX_W, VEX_LIG;
 defm VCVTTSD2SI   : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
-                                "cvttsd2si\t{$src, $dst|$dst, $src}",
+                                "cvttsd2si", "cvttsd2si",
                                 WriteCvtSD2I>,
                                 XD, VEX, VEX_LIG;
 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
-                                "cvttsd2si\t{$src, $dst|$dst, $src}",
+                                "cvttsd2si", "cvttsd2si",
                                 WriteCvtSD2I>,
                                 XD, VEX, VEX_W, VEX_LIG;
 }
@@ -886,13 +888,13 @@ defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR
 // provide other assembly "l" and "q" forms to address this explicitly
 // where appropriate to do so.
 let isCodeGenOnly = 1 in {
-defm VCVTSI2SS   : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}",
+defm VCVTSI2SS   : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss", "l",
                                   WriteCvtI2SS>, XS, VEX_4V, VEX_LIG;
-defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}",
+defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss", "q",
                                   WriteCvtI2SS>, XS, VEX_4V, VEX_W, VEX_LIG;
-defm VCVTSI2SD   : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}",
+defm VCVTSI2SD   : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd", "l",
                                   WriteCvtI2SD>, XD, VEX_4V, VEX_LIG;
-defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}",
+defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd", "q",
                                   WriteCvtI2SD>, XD, VEX_4V, VEX_W, VEX_LIG;
 } // isCodeGenOnly = 1
 
@@ -918,28 +920,28 @@ let Predicates = [UseAVX] in {
 
 let isCodeGenOnly = 1 in {
 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
-                      "cvttss2si\t{$src, $dst|$dst, $src}",
+                      "cvttss2si", "cvttss2si",
                       WriteCvtSS2I>, XS;
 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
-                      "cvttss2si\t{$src, $dst|$dst, $src}",
+                      "cvttss2si", "cvttss2si",
                       WriteCvtSS2I>, XS, REX_W;
 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
-                      "cvttsd2si\t{$src, $dst|$dst, $src}",
+                      "cvttsd2si", "cvttsd2si",
                       WriteCvtSD2I>, XD;
 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
-                      "cvttsd2si\t{$src, $dst|$dst, $src}",
+                      "cvttsd2si", "cvttsd2si",
                       WriteCvtSD2I>, XD, REX_W;
 defm CVTSI2SS  : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
-                      "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
+                      "cvtsi2ss", "cvtsi2ss{l}",
                       WriteCvtI2SS, ReadInt2Fpu>, XS;
 defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
-                      "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
+                      "cvtsi2ss", "cvtsi2ss{q}",
                       WriteCvtI2SS, ReadInt2Fpu>, XS, REX_W;
 defm CVTSI2SD  : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
-                      "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
+                      "cvtsi2sd", "cvtsi2sd{l}",
                       WriteCvtI2SD, ReadInt2Fpu>, XD;
 defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
-                      "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
+                      "cvtsi2sd", "cvtsi2sd{q}",
                       WriteCvtI2SD, ReadInt2Fpu>, XD, REX_W;
 } // isCodeGenOnly = 1
 
@@ -962,7 +964,7 @@ multiclass sse12_cvt_sint<bits<8> opc, R
 
 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
                     RegisterClass DstRC, X86MemOperand x86memop,
-                    string asm, X86FoldableSchedWrite sched,
+                    string asm, string mem, X86FoldableSchedWrite sched,
                     bit Is2Addr = 1> {
 let hasSideEffects = 0 in {
   def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
@@ -974,8 +976,8 @@ let hasSideEffects = 0 in {
   def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst),
                   (ins DstRC:$src1, x86memop:$src2),
                   !if(Is2Addr,
-                      !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
-                      !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+                      asm#"{"#mem#"}\t{$src2, $dst|$dst, $src2}",
+                      asm#"{"#mem#"}\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                   []>, Sched<[sched.Folded, sched.ReadAfterFold]>;
 }
 }
@@ -996,30 +998,48 @@ defm CVTSD2SI64 : sse12_cvt_sint<0x2D, V
 
 let Predicates = [UseAVX] in {
 defm VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
-          i32mem, "cvtsi2ss{l}", WriteCvtI2SS, 0>, XS, VEX_4V, VEX_LIG;
+          i32mem, "cvtsi2ss", "l", WriteCvtI2SS, 0>, XS, VEX_4V, VEX_LIG;
 defm VCVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
-          i64mem, "cvtsi2ss{q}", WriteCvtI2SS, 0>, XS, VEX_4V, VEX_LIG, VEX_W;
+          i64mem, "cvtsi2ss", "q", WriteCvtI2SS, 0>, XS, VEX_4V, VEX_LIG, VEX_W;
 defm VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
-          i32mem, "cvtsi2sd{l}", WriteCvtI2SD, 0>, XD, VEX_4V, VEX_LIG;
+          i32mem, "cvtsi2sd", "l", WriteCvtI2SD, 0>, XD, VEX_4V, VEX_LIG;
 defm VCVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
-          i64mem, "cvtsi2sd{q}", WriteCvtI2SD, 0>, XD, VEX_4V, VEX_LIG, VEX_W;
+          i64mem, "cvtsi2sd", "q", WriteCvtI2SD, 0>, XD, VEX_4V, VEX_LIG, VEX_W;
 }
 let Constraints = "$src1 = $dst" in {
   defm CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
-                        i32mem, "cvtsi2ss{l}", WriteCvtI2SS>, XS;
+                        i32mem, "cvtsi2ss", "l", WriteCvtI2SS>, XS;
   defm CVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
-                        i64mem, "cvtsi2ss{q}", WriteCvtI2SS>, XS, REX_W;
+                        i64mem, "cvtsi2ss", "q", WriteCvtI2SS>, XS, REX_W;
   defm CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
-                        i32mem, "cvtsi2sd{l}", WriteCvtI2SD>, XD;
+                        i32mem, "cvtsi2sd", "l", WriteCvtI2SD>, XD;
   defm CVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
-                        i64mem, "cvtsi2sd{q}", WriteCvtI2SD>, XD, REX_W;
+                        i64mem, "cvtsi2sd", "q", WriteCvtI2SD>, XD, REX_W;
 }
 
+def : InstAlias<"vcvtsi2ss{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+               (VCVTSI2SSrr_Int VR128:$dst, VR128:$src1, GR32:$src2), 0, "att">;
+def : InstAlias<"vcvtsi2ss{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+               (VCVTSI642SSrr_Int VR128:$dst, VR128:$src1, GR64:$src2), 0, "att">;
+def : InstAlias<"vcvtsi2sd{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+               (VCVTSI2SDrr_Int VR128:$dst, VR128:$src1, GR32:$src2), 0, "att">;
+def : InstAlias<"vcvtsi2sd{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+               (VCVTSI642SDrr_Int VR128:$dst, VR128:$src1, GR64:$src2), 0, "att">;
+
 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
               (VCVTSI2SSrm_Int VR128:$dst, VR128:$src1, i32mem:$src), 0, "att">;
 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
               (VCVTSI2SDrm_Int VR128:$dst, VR128:$src1, i32mem:$src), 0, "att">;
 
+def : InstAlias<"cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
+                (CVTSI2SSrr_Int VR128:$dst, GR32:$src), 0, "att">;
+def : InstAlias<"cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
+                (CVTSI642SSrr_Int VR128:$dst, GR64:$src), 0, "att">;
+def : InstAlias<"cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
+                (CVTSI2SDrr_Int VR128:$dst, GR32:$src), 0, "att">;
+def : InstAlias<"cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
+                (CVTSI642SDrr_Int VR128:$dst, GR64:$src), 0, "att">;
+
 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
                 (CVTSI2SSrm_Int VR128:$dst, i32mem:$src), 0, "att">;
 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",

Modified: llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll Mon May  6 14:39:51 2019
@@ -39,13 +39,13 @@ define %struct.__vv* @t(%struct.Key* %de
 ; CHECK-NEXT:    ## in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl 0, %eax
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
-; CHECK-NEXT:    cvtsi2ssq %rax, %xmm0
+; CHECK-NEXT:    cvtsi2ss %rax, %xmm0
 ; CHECK-NEXT:    movl 4, %eax
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    cvtsi2ssq %rax, %xmm1
+; CHECK-NEXT:    cvtsi2ss %rax, %xmm1
 ; CHECK-NEXT:    movl 8, %eax
 ; CHECK-NEXT:    xorps %xmm2, %xmm2
-; CHECK-NEXT:    cvtsi2ssq %rax, %xmm2
+; CHECK-NEXT:    cvtsi2ss %rax, %xmm2
 ; CHECK-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; CHECK-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; CHECK-NEXT:    movaps %xmm0, 0

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt-widen.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt-widen.ll Mon May  6 14:39:51 2019
@@ -22,27 +22,27 @@ define <8 x double> @sltof864(<8 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -66,14 +66,14 @@ define <4 x double> @slto4f64(<4 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -97,9 +97,9 @@ define <2 x double> @slto2f64(<2 x i64>
 ; NODQ-LABEL: slto2f64:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; NODQ-NEXT:    retq
 ;
@@ -123,11 +123,11 @@ define <2 x float> @sltof2f32(<2 x i64>
 ; NODQ-LABEL: sltof2f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; NODQ-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; NODQ-NEXT:    retq
 ;
@@ -153,15 +153,15 @@ define <4 x float> @slto4f32_mem(<4 x i6
 ; NODQ-NEXT:    vmovdqu (%rdi), %xmm0
 ; NODQ-NEXT:    vmovdqu 16(%rdi), %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; NODQ-NEXT:    retq
 ;
@@ -254,16 +254,16 @@ define <4 x float> @slto4f32(<4 x i64> %
 ; NODQ-LABEL: slto4f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vzeroupper
 ; NODQ-NEXT:    retq
@@ -289,16 +289,16 @@ define <4 x float> @ulto4f32(<4 x i64> %
 ; NODQ-LABEL: ulto4f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vzeroupper
 ; NODQ-NEXT:    retq
@@ -916,28 +916,28 @@ define <8 x float> @slto8f32(<8 x i64> %
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -960,54 +960,54 @@ define <16 x float> @slto16f32(<16 x i64
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1035,27 +1035,27 @@ define <8 x double> @slto8f64(<8 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1079,53 +1079,53 @@ define <16 x double> @slto16f64(<16 x i6
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm1, %zmm1
@@ -1151,28 +1151,28 @@ define <8 x float> @ulto8f32(<8 x i64> %
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -1195,54 +1195,54 @@ define <16 x float> @ulto16f32(<16 x i64
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1424,7 +1424,7 @@ define i32 @fptoui(float %a) nounwind {
 define float @uitof32(i32 %a) nounwind {
 ; ALL-LABEL: uitof32:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %b = uitofp i32 %a to float
   ret float %b
@@ -1433,7 +1433,7 @@ define float @uitof32(i32 %a) nounwind {
 define double @uitof64(i32 %a) nounwind {
 ; ALL-LABEL: uitof64:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %b = uitofp i32 %a to double
   ret double %b

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Mon May  6 14:39:51 2019
@@ -22,27 +22,27 @@ define <8 x double> @sltof864(<8 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -66,14 +66,14 @@ define <4 x double> @slto4f64(<4 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -97,9 +97,9 @@ define <2 x double> @slto2f64(<2 x i64>
 ; NODQ-LABEL: slto2f64:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; NODQ-NEXT:    retq
 ;
@@ -123,11 +123,11 @@ define <2 x float> @sltof2f32(<2 x i64>
 ; NODQ-LABEL: sltof2f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; NODQ-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; NODQ-NEXT:    retq
 ;
@@ -153,15 +153,15 @@ define <4 x float> @slto4f32_mem(<4 x i6
 ; NODQ-NEXT:    vmovdqu (%rdi), %xmm0
 ; NODQ-NEXT:    vmovdqu 16(%rdi), %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; NODQ-NEXT:    retq
 ;
@@ -254,16 +254,16 @@ define <4 x float> @slto4f32(<4 x i64> %
 ; NODQ-LABEL: slto4f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vzeroupper
 ; NODQ-NEXT:    retq
@@ -289,16 +289,16 @@ define <4 x float> @ulto4f32(<4 x i64> %
 ; NODQ-LABEL: ulto4f32:
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vzeroupper
 ; NODQ-NEXT:    retq
@@ -918,28 +918,28 @@ define <8 x float> @slto8f32(<8 x i64> %
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -962,54 +962,54 @@ define <16 x float> @slto16f32(<16 x i64
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1037,27 +1037,27 @@ define <8 x double> @slto8f64(<8 x i64>
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1081,53 +1081,53 @@ define <16 x double> @slto16f64(<16 x i6
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtsi2sd %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm1, %zmm1
@@ -1153,28 +1153,28 @@ define <8 x float> @ulto8f32(<8 x i64> %
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
@@ -1197,54 +1197,54 @@ define <16 x float> @ulto16f32(<16 x i64
 ; NODQ:       # %bb.0:
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm1, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; NODQ-NEXT:    vmovq %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
 ; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm2, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
 ; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
 ; NODQ-NEXT:    vmovq %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm3, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
 ; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; NODQ-NEXT:    vmovq %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
-; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; NODQ-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
 ; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1426,7 +1426,7 @@ define i32 @fptoui(float %a) nounwind {
 define float @uitof32(i32 %a) nounwind {
 ; ALL-LABEL: uitof32:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %b = uitofp i32 %a to float
   ret float %b
@@ -1435,7 +1435,7 @@ define float @uitof32(i32 %a) nounwind {
 define double @uitof64(i32 %a) nounwind {
 ; ALL-LABEL: uitof64:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
   %b = uitofp i32 %a to double
   ret double %b

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Mon May  6 14:39:51 2019
@@ -1826,7 +1826,7 @@ define <2 x double> @test_mm_cvtu32_sd(<
 ;
 ; X64-LABEL: test_mm_cvtu32_sd:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
 ; X64-NEXT:    retq
 entry:
   %conv.i = uitofp i32 %__B to double
@@ -1847,7 +1847,7 @@ define <2 x double> @test_mm_cvtu64_sd(<
 ;
 ; X64-LABEL: test_mm_cvtu64_sd:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
 ; X64-NEXT:    retq
 entry:
   %conv.i = uitofp i64 %__B to double
@@ -1863,7 +1863,7 @@ define <4 x float> @test_mm_cvtu32_ss(<4
 ;
 ; X64-LABEL: test_mm_cvtu32_ss:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; X64-NEXT:    retq
 entry:
   %conv.i = uitofp i32 %__B to float
@@ -1900,7 +1900,7 @@ define <4 x float> @test_mm_cvtu64_ss(<4
 ;
 ; X64-LABEL: test_mm_cvtu64_ss:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
 ; X64-NEXT:    retq
 entry:
   %conv.i = uitofp i64 %__B to float

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll Mon May  6 14:39:51 2019
@@ -6669,7 +6669,7 @@ define <2 x double> @test_x86_avx512_mm_
 ;
 ; X64-LABEL: test_x86_avx512_mm_cvtu32_sd:
 ; X64:       ## %bb.0:
-; X64-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7f,0x08,0x7b,0xc7]
+; X64-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7f,0x08,0x7b,0xc7]
 ; X64-NEXT:    retq ## encoding: [0xc3]
 {
   %res = call <2 x double> @llvm.x86.avx512.cvtusi2sd(<2 x double> %a, i32 %b) ; <<<2 x double>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll Mon May  6 14:39:51 2019
@@ -15,7 +15,7 @@ declare i64 @llvm.x86.sse2.cvtsd2si64(<2
 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -64,7 +64,7 @@ declare i64 @llvm.x86.sse.cvtss2si64(<4
 define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
 ; CHECK-LABEL: test_x86_sse_cvtsi642ss:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -193,7 +193,7 @@ declare i64 @llvm.x86.avx512.vcvtss2si64
 define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtsi2sd %rdi, {rz-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 11) ; <<<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -203,7 +203,7 @@ declare <2 x double> @llvm.x86.avx512.cv
 define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtsi2ss %rdi, {rz-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 11) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -213,7 +213,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b) {
 ; CHECK-LABEL: _mm_cvt_roundu64_ss:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtusi2ss %rdi, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 9) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -222,7 +222,7 @@ define <4 x float> @_mm_cvt_roundu64_ss
 define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b) {
 ; CHECK-LABEL: _mm_cvtu64_ss:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 4) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -232,7 +232,7 @@ declare <4 x float> @llvm.x86.avx512.cvt
 define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtusi2sd %rdi, {rd-sae}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 9) ; <<<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -241,7 +241,7 @@ define <2 x double> @test_x86_avx512_mm_
 define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b) {
 ; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 4) ; <<<2 x double>> [#uses=1]
   ret <2 x double> %res

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Mon May  6 14:39:51 2019
@@ -2913,13 +2913,13 @@ define <2 x double> @test_maskz_max_sd_m
 define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
 ; X64-LABEL: test_x86_avx512_cvtsi2ss32:
 ; X64:       # %bb.0:
-; X64-NEXT:    vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
+; X64-NEXT:    vcvtsi2ss %edi, {rz-sae}, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_x86_avx512_cvtsi2ss32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vcvtsi2ssl %eax, {rz-sae}, %xmm0, %xmm0
+; X86-NEXT:    vcvtsi2ss %eax, {rz-sae}, %xmm0, %xmm0
 ; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 11) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -2929,13 +2929,13 @@ declare <4 x float> @llvm.x86.avx512.cvt
 define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) {
 ; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
 ; X64:       # %bb.0:
-; X64-NEXT:    vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2ss %edi, {rd-sae}, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X86-NEXT:    vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
 ; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 9) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
@@ -2945,14 +2945,14 @@ define <4 x float> @test_x86_avx512__mm_
 ; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl (%eax), %eax
-; X86-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X86-NEXT:    vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
 ; X86-NEXT:    retl
   %b = load i32, i32* %ptr
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 9) ; <<<4 x float>> [#uses=1]
@@ -2962,7 +2962,7 @@ define <4 x float> @test_x86_avx512__mm_
 define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) {
 ; X64-LABEL: test_x86_avx512__mm_cvtu32_ss:
 ; X64:       # %bb.0:
-; X64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; X64-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_x86_avx512__mm_cvtu32_ss:

Modified: llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll Mon May  6 14:39:51 2019
@@ -1215,15 +1215,15 @@ define x86_regcallcc i32 @test_argRetMix
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X32-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; X32-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; X32-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
+; X32-NEXT:    vcvtsi2sd %eax, %xmm2, %xmm1
 ; X32-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vcvtsi2sdl %ecx, %xmm2, %xmm1
+; X32-NEXT:    vcvtsi2sd %ecx, %xmm2, %xmm1
 ; X32-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vmovd %edx, %xmm1
 ; X32-NEXT:    vpinsrd $1, %edi, %xmm1, %xmm1
 ; X32-NEXT:    vcvtqq2pd %ymm1, %ymm1
 ; X32-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vcvtsi2sdl %esi, %xmm2, %xmm1
+; X32-NEXT:    vcvtsi2sd %esi, %xmm2, %xmm1
 ; X32-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vcvtsi2sdl (%ebx), %xmm2, %xmm1
 ; X32-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
@@ -1236,13 +1236,13 @@ define x86_regcallcc i32 @test_argRetMix
 ; WIN64:       # %bb.0:
 ; WIN64-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; WIN64-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
+; WIN64-NEXT:    vcvtsi2sd %eax, %xmm2, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; WIN64-NEXT:    vcvtsi2sdl %ecx, %xmm2, %xmm1
+; WIN64-NEXT:    vcvtsi2sd %ecx, %xmm2, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; WIN64-NEXT:    vcvtsi2sdq %rdx, %xmm2, %xmm1
+; WIN64-NEXT:    vcvtsi2sd %rdx, %xmm2, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; WIN64-NEXT:    vcvtsi2sdl %edi, %xmm2, %xmm1
+; WIN64-NEXT:    vcvtsi2sd %edi, %xmm2, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; WIN64-NEXT:    vcvtsi2sdl (%rsi), %xmm2, %xmm1
 ; WIN64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
@@ -1253,13 +1253,13 @@ define x86_regcallcc i32 @test_argRetMix
 ; LINUXOSX64:       # %bb.0:
 ; LINUXOSX64-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; LINUXOSX64-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
+; LINUXOSX64-NEXT:    vcvtsi2sd %eax, %xmm2, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; LINUXOSX64-NEXT:    vcvtsi2sdl %ecx, %xmm2, %xmm1
+; LINUXOSX64-NEXT:    vcvtsi2sd %ecx, %xmm2, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; LINUXOSX64-NEXT:    vcvtsi2sdq %rdx, %xmm2, %xmm1
+; LINUXOSX64-NEXT:    vcvtsi2sd %rdx, %xmm2, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; LINUXOSX64-NEXT:    vcvtsi2sdl %edi, %xmm2, %xmm1
+; LINUXOSX64-NEXT:    vcvtsi2sd %edi, %xmm2, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
 ; LINUXOSX64-NEXT:    vcvtsi2sdl (%rsi), %xmm2, %xmm1
 ; LINUXOSX64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/break-false-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-false-dep.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-false-dep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-false-dep.ll Mon May  6 14:39:51 2019
@@ -75,11 +75,11 @@ declare double @llvm.sqrt.f64(double)
 ; instructions, they are still dependent on themselves.
 ; SSE: xorps [[XMM1:%xmm[0-9]+]]
 ; SSE: , [[XMM1]]
-; SSE: cvtsi2ssl %{{.*}}, [[XMM1]]
+; SSE: cvtsi2ss %{{.*}}, [[XMM1]]
 ; SSE: xorps [[XMM2:%xmm[0-9]+]]
 ; SSE: , [[XMM2]]
-; SSE: cvtsi2ssl %{{.*}}, [[XMM2]]
-;
+; SSE: cvtsi2ss %{{.*}}, [[XMM2]]
+
 define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 entry:
   %tobool3 = icmp eq i32 %m, 0
@@ -107,18 +107,18 @@ for.end:
 }
 
 ; rdar:15221834 False AVX register dependencies cause 5x slowdown on
-; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
+; flops-6. Make sure the unused register read by vcvtsi2sd is zeroed
 ; to avoid cyclic dependence on a write to the same register in a
 ; previous iteration.
 
 ; AVX-LABEL: loopdep2:
 ; AVX-LABEL: %loop
 ; AVX: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
-; AVX: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
+; AVX: vcvtsi2sd %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
 ; SSE-LABEL: loopdep2:
 ; SSE-LABEL: %loop
 ; SSE: xorps %[[REG:xmm.]], %[[REG]]
-; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
+; SSE: cvtsi2sd %{{r[0-9a-x]+}}, %[[REG]]
 define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
 entry:
   %vx = load i64, i64* %x
@@ -217,7 +217,7 @@ top:
   ret double %tmp1
 ;AVX-LABEL:@inlineasmdep
 ;AVX: vxorps  [[XMM0:%xmm[0-9]+]], [[XMM0]], [[XMM0]]
-;AVX-NEXT: vcvtsi2sdq {{.*}}, [[XMM0]], {{%xmm[0-9]+}}
+;AVX-NEXT: vcvtsi2sd {{.*}}, [[XMM0]], {{%xmm[0-9]+}}
 }
 
 ; Make sure we are making a smart choice regarding undef registers and
@@ -257,7 +257,7 @@ top:
   ret double %tmp1
 ;AVX-LABEL:@clearence
 ;AVX: vxorps  [[XMM6:%xmm6]], [[XMM6]], [[XMM6]]
-;AVX-NEXT: vcvtsi2sdq {{.*}}, [[XMM6]], {{%xmm[0-9]+}}
+;AVX-NEXT: vcvtsi2sd {{.*}}, [[XMM6]], {{%xmm[0-9]+}}
 }
 
 ; Make sure we are making a smart choice regarding undef registers in order to
@@ -291,7 +291,7 @@ ret:
 ;AVX-LABEL:@loopclearence
 ;Registers 4-7 are not used and therefore one of them should be chosen
 ;AVX-NOT: {{%xmm[4-7]}}
-;AVX: vcvtsi2sdq {{.*}}, [[XMM4_7:%xmm[4-7]]], {{%xmm[0-9]+}}
+;AVX: vcvtsi2sd {{.*}}, [[XMM4_7:%xmm[4-7]]], {{%xmm[0-9]+}}
 ;AVX-NOT: [[XMM4_7]]
 }
 
@@ -335,12 +335,12 @@ loop_end:
   ; the only reasonable choice. The primary thing we care about is that it's
   ; not one of the registers used in the loop (e.g. not the output reg here)
 ;AVX-NOT: %xmm6
-;AVX: vcvtsi2sdq {{.*}}, %xmm6, {{%xmm[0-9]+}}
+;AVX: vcvtsi2sd {{.*}}, %xmm6, {{%xmm[0-9]+}}
 ;AVX-NOT: %xmm6
   %nexti_f = sitofp i64 %nexti to double
   %sub = fsub double %c1, %nexti_f
   %mul = fmul double %sub, %c2
-;AVX: vcvtsi2sdq {{.*}}, %xmm6, {{%xmm[0-9]+}}
+;AVX: vcvtsi2sd {{.*}}, %xmm6, {{%xmm[0-9]+}}
 ;AVX-NOT: %xmm6
   %phi_f = sitofp i64 %phi to double
   %mul2 = fmul double %phi_f, %c3

Modified: llvm/trunk/test/CodeGen/X86/copy-propagation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/copy-propagation.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/copy-propagation.ll (original)
+++ llvm/trunk/test/CodeGen/X86/copy-propagation.ll Mon May  6 14:39:51 2019
@@ -14,7 +14,7 @@ target triple = "x86_64-pc-win32-elf"
 ; required, which would have hidden the bug.
 ; CHECK: vmovapd	%xmm0, [[TMP:%xmm[0-9]+]]
 ; CHECK-NOT: vxorps  %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vcvtsi2sdq      %rsi, %xmm0, %xmm6
+; CHECK-NEXT: vcvtsi2sd      %rsi, %xmm0, %xmm6
 ; CHECK: movl	$339772768, %e[[INDIRECT_CALL2:[a-z]+]]
 ; CHECK-NOT: vmovapd %xmm7, %xmm0
 ; CHECK-NEXT: vmovapd %xmm6, %xmm1

Modified: llvm/trunk/test/CodeGen/X86/cvtv2f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cvtv2f32.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cvtv2f32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cvtv2f32.ll Mon May  6 14:39:51 2019
@@ -25,9 +25,9 @@ define <2 x float> @uitofp_2i32_cvt_buil
 ; X64-LABEL: uitofp_2i32_cvt_buildvector:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    cvtsi2ssq %rax, %xmm1
+; X64-NEXT:    cvtsi2ss %rax, %xmm1
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    cvtsi2ssq %rax, %xmm2
+; X64-NEXT:    cvtsi2ss %rax, %xmm2
 ; X64-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
 ; X64-NEXT:    mulps %xmm1, %xmm0
 ; X64-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll Mon May  6 14:39:51 2019
@@ -7,12 +7,12 @@
 define double @long_to_double_rr(i64 %a) {
 ; SSE2-LABEL: long_to_double_rr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_double_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
   %0 = sitofp i64 %a to double
@@ -54,12 +54,12 @@ entry:
 define float @long_to_float_rr(i64 %a) {
 ; SSE2-LABEL: long_to_float_rr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: long_to_float_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 entry:
   %0 = sitofp i64 %a to float

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll Mon May  6 14:39:51 2019
@@ -10,12 +10,12 @@
 define double @int_to_double_rr(i32 %a) {
 ; SSE2-LABEL: int_to_double_rr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    cvtsi2sdl %edi, %xmm0
+; SSE2-NEXT:    cvtsi2sd %edi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_double_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_double_rr:
@@ -163,12 +163,12 @@ entry:
 define float @int_to_float_rr(i32 %a) {
 ; SSE2-LABEL: int_to_float_rr:
 ; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    cvtsi2ssl %edi, %xmm0
+; SSE2-NEXT:    cvtsi2ss %edi, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: int_to_float_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; SSE2_X86-LABEL: int_to_float_rr:

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion-x86-64.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion-x86-64.ll Mon May  6 14:39:51 2019
@@ -5,7 +5,7 @@
 define double @long_to_double_rr(i64 %a) {
 ; ALL-LABEL: long_to_double_rr:
 ; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
 entry:
   %0 = uitofp i64 %a to double
@@ -37,7 +37,7 @@ entry:
 define float @long_to_float_rr(i64 %a) {
 ; ALL-LABEL: long_to_float_rr:
 ; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
+; ALL-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
 ; ALL-NEXT:    retq
 entry:
   %0 = uitofp i64 %a to float

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-uint-float-conversion.ll Mon May  6 14:39:51 2019
@@ -6,7 +6,7 @@
 define double @int_to_double_rr(i32 %a) {
 ; AVX-LABEL: int_to_double_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX_X86-LABEL: int_to_double_rr:
@@ -91,7 +91,7 @@ entry:
 define float @int_to_float_rr(i32 %a) {
 ; AVX-LABEL: int_to_float_rr:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX_X86-LABEL: int_to_float_rr:

Modified: llvm/trunk/test/CodeGen/X86/ftrunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ftrunc.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ftrunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ftrunc.ll Mon May  6 14:39:51 2019
@@ -9,7 +9,7 @@ define float @trunc_unsigned_f32(float %
 ; SSE2-NEXT:    cvttss2si %xmm0, %rax
 ; SSE2-NEXT:    movl %eax, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc_unsigned_f32:
@@ -225,7 +225,7 @@ define float @trunc_signed_f32(float %x)
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    cvttss2si %xmm0, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc_signed_f32:
@@ -247,7 +247,7 @@ define double @trunc_signed_f64(double %
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    cvttsd2si %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc_signed_f64:
@@ -292,8 +292,8 @@ define <2 x double> @trunc_signed_v2f64(
 ; SSE2-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE2-NEXT:    cvttsd2si %xmm0, %rcx
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rcx, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rcx, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
@@ -321,13 +321,13 @@ define <4 x double> @trunc_signed_v4f64(
 ; SSE2-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE2-NEXT:    cvttsd2si %xmm0, %rsi
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rdx, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rdx, %xmm0
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rsi, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rsi, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rcx, %xmm2
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rcx, %xmm2
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE2-NEXT:    retq
 ;
@@ -355,7 +355,7 @@ define float @trunc_unsigned_f32_disable
 ; SSE2-NEXT:    cvttss2si %xmm0, %rax
 ; SSE2-NEXT:    movl %eax, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc_unsigned_f32_disable_via_attr:
@@ -363,14 +363,14 @@ define float @trunc_unsigned_f32_disable
 ; SSE41-NEXT:    cvttss2si %xmm0, %rax
 ; SSE41-NEXT:    movl %eax, %eax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_unsigned_f32_disable_via_attr:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vcvttss2si %xmm0, %rax
 ; AVX1-NEXT:    movl %eax, %eax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
   %i = fptoui float %x to i32
   %r = uitofp i32 %i to float
@@ -382,20 +382,20 @@ define double @trunc_signed_f64_disable_
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    cvttsd2si %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: trunc_signed_f64_disable_via_attr:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttsd2si %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: trunc_signed_f64_disable_via_attr:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vcvttsd2si %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
   %i = fptosi double %x to i64
   %r = sitofp i64 %i to double

Modified: llvm/trunk/test/CodeGen/X86/half.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/half.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/half.ll (original)
+++ llvm/trunk/test/CodeGen/X86/half.ll Mon May  6 14:39:51 2019
@@ -230,7 +230,7 @@ define void @test_sitofp_i64(i64 %a, hal
 ; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    pushq %rbx
 ; CHECK-LIBCALL-NEXT:    movq %rsi, %rbx
-; CHECK-LIBCALL-NEXT:    cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT:    cvtsi2ss %rdi, %xmm0
 ; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
 ; CHECK-LIBCALL-NEXT:    movw %ax, (%rbx)
 ; CHECK-LIBCALL-NEXT:    popq %rbx
@@ -238,7 +238,7 @@ define void @test_sitofp_i64(i64 %a, hal
 ;
 ; BWON-F16C-LABEL: test_sitofp_i64:
 ; BWON-F16C:       # %bb.0:
-; BWON-F16C-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vmovd %xmm0, %eax
 ; BWON-F16C-NEXT:    movw %ax, (%rsi)
@@ -322,14 +322,14 @@ define void @test_uitofp_i64(i64 %a, hal
 ; CHECK-LIBCALL-NEXT:    testq %rdi, %rdi
 ; CHECK-LIBCALL-NEXT:    js .LBB10_1
 ; CHECK-LIBCALL-NEXT:  # %bb.2:
-; CHECK-LIBCALL-NEXT:    cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT:    cvtsi2ss %rdi, %xmm0
 ; CHECK-LIBCALL-NEXT:    jmp .LBB10_3
 ; CHECK-LIBCALL-NEXT:  .LBB10_1:
 ; CHECK-LIBCALL-NEXT:    movq %rdi, %rax
 ; CHECK-LIBCALL-NEXT:    shrq %rax
 ; CHECK-LIBCALL-NEXT:    andl $1, %edi
 ; CHECK-LIBCALL-NEXT:    orq %rax, %rdi
-; CHECK-LIBCALL-NEXT:    cvtsi2ssq %rdi, %xmm0
+; CHECK-LIBCALL-NEXT:    cvtsi2ss %rdi, %xmm0
 ; CHECK-LIBCALL-NEXT:    addss %xmm0, %xmm0
 ; CHECK-LIBCALL-NEXT:  .LBB10_3:
 ; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
@@ -342,14 +342,14 @@ define void @test_uitofp_i64(i64 %a, hal
 ; BWON-F16C-NEXT:    testq %rdi, %rdi
 ; BWON-F16C-NEXT:    js .LBB10_1
 ; BWON-F16C-NEXT:  # %bb.2:
-; BWON-F16C-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    jmp .LBB10_3
 ; BWON-F16C-NEXT:  .LBB10_1:
 ; BWON-F16C-NEXT:    movq %rdi, %rax
 ; BWON-F16C-NEXT:    shrq %rax
 ; BWON-F16C-NEXT:    andl $1, %edi
 ; BWON-F16C-NEXT:    orq %rax, %rdi
-; BWON-F16C-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; BWON-F16C-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; BWON-F16C-NEXT:  .LBB10_3:
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
@@ -886,7 +886,7 @@ define float @test_sitofp_fadd_i32(i32 %
 ; CHECK-LIBCALL-NEXT:    movzwl (%rsi), %edi
 ; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
 ; CHECK-LIBCALL-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; CHECK-LIBCALL-NEXT:    cvtsi2ssl %ebx, %xmm0
+; CHECK-LIBCALL-NEXT:    cvtsi2ss %ebx, %xmm0
 ; CHECK-LIBCALL-NEXT:    callq __gnu_f2h_ieee
 ; CHECK-LIBCALL-NEXT:    movzwl %ax, %edi
 ; CHECK-LIBCALL-NEXT:    callq __gnu_h2f_ieee
@@ -900,7 +900,7 @@ define float @test_sitofp_fadd_i32(i32 %
 ; BWON-F16C-NEXT:    movswl (%rsi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
-; BWON-F16C-NEXT:    vcvtsi2ssl %edi, %xmm1, %xmm1
+; BWON-F16C-NEXT:    vcvtsi2ss %edi, %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vaddss %xmm1, %xmm0, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits-vector.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll Mon May  6 14:39:51 2019
@@ -35,7 +35,7 @@ define float @knownbits_mask_extract_uit
 ; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %xmm0, %rax
 ; X64-NEXT:    movzwl %ax, %eax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
   %2 = extractelement <2 x i64> %1, i32 0

Modified: llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll Mon May  6 14:39:51 2019
@@ -69,7 +69,7 @@ define float @signbits_ashr_extract_sito
 ; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %xmm0, %rax
 ; X64-NEXT:    shrq $32, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 32, i64 32>
   %2 = extractelement <2 x i64> %1, i32 0
@@ -95,7 +95,7 @@ define float @signbits_ashr_extract_sito
 ; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %xmm0, %rax
 ; X64-NEXT:    shrq $32, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 32, i64 63>
   %2 = extractelement <2 x i64> %1, i32 0
@@ -123,7 +123,7 @@ define float @signbits_ashr_shl_extract_
 ; X64-NEXT:    vmovq %xmm0, %rax
 ; X64-NEXT:    sarq $61, %rax
 ; X64-NEXT:    shll $20, %eax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
   %2 = shl <2 x i64> %1, <i64 20, i64 16>
@@ -153,7 +153,7 @@ define float @signbits_ashr_insert_ashr_
 ; X64:       # %bb.0:
 ; X64-NEXT:    sarq $30, %rdi
 ; X64-NEXT:    shrq $3, %rdi
-; X64-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
+; X64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr i64 %a0, 30
   %2 = insertelement <2 x i64> undef, i64 %1, i32 0
@@ -244,7 +244,7 @@ define float @signbits_ashr_sext_sextinr
 ; X64-NEXT:    vmovd %edi, %xmm1
 ; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vmovq %xmm0, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm2, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
   %2 = sext i32 %a2 to i64
@@ -290,7 +290,7 @@ define float @signbits_ashr_sextvecinreg
 ; X64-NEXT:    vpor %xmm1, %xmm2, %xmm1
 ; X64-NEXT:    vpxor %xmm0, %xmm1, %xmm0
 ; X64-NEXT:    vmovq %xmm0, %rax
-; X64-NEXT:    vcvtsi2ssl %eax, %xmm3, %xmm0
+; X64-NEXT:    vcvtsi2ss %eax, %xmm3, %xmm0
 ; X64-NEXT:    retq
   %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
   %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 1>

Modified: llvm/trunk/test/CodeGen/X86/pr37879.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr37879.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr37879.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr37879.ll Mon May  6 14:39:51 2019
@@ -5,7 +5,7 @@ define double @foo(i32** nocapture reado
 ; CHECK-LABEL: foo:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    movq (%rax), %rax
-; CHECK-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; CHECK-NEXT:    vcvtsi2sd %rax, %xmm0, %xmm1
 ; CHECK-NEXT:    kmovd %eax, %k1
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/scalar-int-to-fp.ll Mon May  6 14:39:51 2019
@@ -25,7 +25,7 @@ define float @u32_to_f(i32 %a) nounwind
 ;
 ; AVX512_64-LABEL: u32_to_f:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u32_to_f:
@@ -45,7 +45,7 @@ define float @u32_to_f(i32 %a) nounwind
 ; SSE2_64-LABEL: u32_to_f:
 ; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movl %edi, %eax
-; SSE2_64-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u32_to_f:
@@ -77,7 +77,7 @@ define float @s32_to_f(i32 %a) nounwind
 ;
 ; AVX512_64-LABEL: s32_to_f:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s32_to_f:
@@ -91,7 +91,7 @@ define float @s32_to_f(i32 %a) nounwind
 ;
 ; SSE2_64-LABEL: s32_to_f:
 ; SSE2_64:       # %bb.0:
-; SSE2_64-NEXT:    cvtsi2ssl %edi, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %edi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s32_to_f:
@@ -122,7 +122,7 @@ define double @u32_to_d(i32 %a) nounwind
 ;
 ; AVX512_64-LABEL: u32_to_d:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: u32_to_d:
@@ -144,7 +144,7 @@ define double @u32_to_d(i32 %a) nounwind
 ; SSE2_64-LABEL: u32_to_d:
 ; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    movl %edi, %eax
-; SSE2_64-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2_64-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: u32_to_d:
@@ -180,7 +180,7 @@ define double @s32_to_d(i32 %a) nounwind
 ;
 ; AVX512_64-LABEL: s32_to_d:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; SSE2_32-LABEL: s32_to_d:
@@ -198,7 +198,7 @@ define double @s32_to_d(i32 %a) nounwind
 ;
 ; SSE2_64-LABEL: s32_to_d:
 ; SSE2_64:       # %bb.0:
-; SSE2_64-NEXT:    cvtsi2sdl %edi, %xmm0
+; SSE2_64-NEXT:    cvtsi2sd %edi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s32_to_d:
@@ -313,7 +313,7 @@ define float @u64_to_f(i64 %a) nounwind
 ;
 ; AVX512_64-LABEL: u64_to_f:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: u64_to_f:
@@ -374,14 +374,14 @@ define float @u64_to_f(i64 %a) nounwind
 ; SSE2_64-NEXT:    testq %rdi, %rdi
 ; SSE2_64-NEXT:    js .LBB6_1
 ; SSE2_64-NEXT:  # %bb.2:
-; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ; SSE2_64-NEXT:  .LBB6_1:
 ; SSE2_64-NEXT:    movq %rdi, %rax
 ; SSE2_64-NEXT:    shrq %rax
 ; SSE2_64-NEXT:    andl $1, %edi
 ; SSE2_64-NEXT:    orq %rax, %rdi
-; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE2_64-NEXT:    addss %xmm0, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
@@ -423,7 +423,7 @@ define float @s64_to_f(i64 %a) nounwind
 ;
 ; AVX512_64-LABEL: s64_to_f:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: s64_to_f:
@@ -457,7 +457,7 @@ define float @s64_to_f(i64 %a) nounwind
 ;
 ; SSE2_64-LABEL: s64_to_f:
 ; SSE2_64:       # %bb.0:
-; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_f:
@@ -488,7 +488,7 @@ define float @s64_to_f_2(i64 %a) nounwin
 ; AVX512_64-LABEL: s64_to_f_2:
 ; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    addq $5, %rdi
-; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: s64_to_f_2:
@@ -551,7 +551,7 @@ define float @s64_to_f_2(i64 %a) nounwin
 ; SSE2_64-LABEL: s64_to_f_2:
 ; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    addq $5, %rdi
-; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_f_2:
@@ -593,7 +593,7 @@ define double @u64_to_d(i64 %a) nounwind
 ;
 ; AVX512_64-LABEL: u64_to_d:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: u64_to_d:
@@ -697,7 +697,7 @@ define double @s64_to_d(i64 %a) nounwind
 ;
 ; AVX512_64-LABEL: s64_to_d:
 ; AVX512_64:       # %bb.0:
-; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: s64_to_d:
@@ -743,7 +743,7 @@ define double @s64_to_d(i64 %a) nounwind
 ;
 ; SSE2_64-LABEL: s64_to_d:
 ; SSE2_64:       # %bb.0:
-; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_d:
@@ -778,7 +778,7 @@ define double @s64_to_d_2(i64 %a) nounwi
 ; AVX512_64-LABEL: s64_to_d_2:
 ; AVX512_64:       # %bb.0:
 ; AVX512_64-NEXT:    addq $5, %rdi
-; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX512_64-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX512_64-NEXT:    retq
 ;
 ; AVX512DQ_32-LABEL: s64_to_d_2:
@@ -845,7 +845,7 @@ define double @s64_to_d_2(i64 %a) nounwi
 ; SSE2_64-LABEL: s64_to_d_2:
 ; SSE2_64:       # %bb.0:
 ; SSE2_64-NEXT:    addq $5, %rdi
-; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE2_64-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE2_64-NEXT:    retq
 ;
 ; X87-LABEL: s64_to_d_2:

Modified: llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll Mon May  6 14:39:51 2019
@@ -17,13 +17,13 @@ define float @float_to_int_to_float_mem_
 ; SSE-LABEL: float_to_int_to_float_mem_f32_i32:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttss2si (%rdi), %eax
-; SSE-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE-NEXT:    cvtsi2ss %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_mem_f32_i32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttss2si (%rdi), %eax
-; AVX-NEXT:    vcvtsi2ssl %eax, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %p, align 16
   %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x)
@@ -36,13 +36,13 @@ define float @float_to_int_to_float_reg_
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttss2si %xmm0, %eax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE-NEXT:    cvtsi2ss %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_reg_f32_i32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttss2si %xmm0, %eax
-; AVX-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x)
   %sitofp = sitofp i32 %fptosi to float
@@ -53,13 +53,13 @@ define float @float_to_int_to_float_mem_
 ; SSE-LABEL: float_to_int_to_float_mem_f32_i64:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttss2si (%rdi), %rax
-; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_mem_f32_i64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttss2si (%rdi), %rax
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load <4 x float>, <4 x float>* %p, align 16
   %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x)
@@ -72,13 +72,13 @@ define float @float_to_int_to_float_reg_
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttss2si %xmm0, %rax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_reg_f32_i64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttss2si %xmm0, %rax
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x)
   %sitofp = sitofp i64 %fptosi to float
@@ -89,13 +89,13 @@ define double @float_to_int_to_float_mem
 ; SSE-LABEL: float_to_int_to_float_mem_f64_i32:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttsd2si (%rdi), %eax
-; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    cvtsi2sd %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_mem_f64_i32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttsd2si (%rdi), %eax
-; AVX-NEXT:    vcvtsi2sdl %eax, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %p, align 16
   %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x)
@@ -108,13 +108,13 @@ define double @float_to_int_to_float_reg
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %eax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    cvtsi2sd %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_reg_f64_i32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttsd2si %xmm0, %eax
-; AVX-NEXT:    vcvtsi2sdl %eax, %xmm1, %xmm0
+; AVX-NEXT:    vcvtsi2sd %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x)
   %sitofp = sitofp i32 %fptosi to double
@@ -125,13 +125,13 @@ define double @float_to_int_to_float_mem
 ; SSE-LABEL: float_to_int_to_float_mem_f64_i64:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttsd2si (%rdi), %rax
-; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_mem_f64_i64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttsd2si (%rdi), %rax
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load <2 x double>, <2 x double>* %p, align 16
   %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x)
@@ -144,13 +144,13 @@ define double @float_to_int_to_float_reg
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    cvttsd2si %xmm0, %rax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: float_to_int_to_float_reg_f64_i64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vcvttsd2si %xmm0, %rax
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x)
   %sitofp = sitofp i64 %fptosi to double

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel-x86_64.ll Mon May  6 14:39:51 2019
@@ -8,12 +8,12 @@
 define <4 x float> @test_mm_cvtsi64_ss(<4 x float> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: test_mm_cvtsi64_ss:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_mm_cvtsi64_ss:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1)
   ret <4 x float> %res

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-fast-isel.ll Mon May  6 14:39:51 2019
@@ -760,17 +760,17 @@ define <4 x float> @test_mm_cvtsi32_ss(<
 ;
 ; X64-SSE-LABEL: test_mm_cvtsi32_ss:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    cvtsi2ssl %edi, %xmm0 # encoding: [0xf3,0x0f,0x2a,0xc7]
+; X64-SSE-NEXT:    cvtsi2ss %edi, %xmm0 # encoding: [0xf3,0x0f,0x2a,0xc7]
 ; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX1-LABEL: test_mm_cvtsi32_ss:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x2a,0xc7]
+; X64-AVX1-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x2a,0xc7]
 ; X64-AVX1-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512-LABEL: test_mm_cvtsi32_ss:
 ; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc7]
+; X64-AVX512-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc7]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 %a1)
   ret <4 x float> %res

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll Mon May  6 14:39:51 2019
@@ -190,17 +190,17 @@ define <4 x float> @test_x86_sse_cvtsi2s
 ;
 ; X64-SSE-LABEL: test_x86_sse_cvtsi2ss:
 ; X64-SSE:       ## %bb.0:
-; X64-SSE-NEXT:    cvtsi2ssl %edi, %xmm0 ## encoding: [0xf3,0x0f,0x2a,0xc7]
+; X64-SSE-NEXT:    cvtsi2ss %edi, %xmm0 ## encoding: [0xf3,0x0f,0x2a,0xc7]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; X64-AVX1-LABEL: test_x86_sse_cvtsi2ss:
 ; X64-AVX1:       ## %bb.0:
-; X64-AVX1-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc7]
+; X64-AVX1-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc7]
 ; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
 ; X64-AVX512-LABEL: test_x86_sse_cvtsi2ss:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc7]
+; X64-AVX512-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc7]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res

Modified: llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64-upgrade.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-intrinsics-x86_64-upgrade.ll Mon May  6 14:39:51 2019
@@ -6,17 +6,17 @@
 define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
 ; SSE-LABEL: test_x86_sse_cvtsi642ss:
 ; SSE:       ## %bb.0:
-; SSE-NEXT:    cvtsi2ssq %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
+; SSE-NEXT:    cvtsi2ss %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX1-LABEL: test_x86_sse_cvtsi642ss:
 ; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
+; AVX1-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
 ; AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_x86_sse_cvtsi642ss:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
+; AVX512-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel-x86_64.ll Mon May  6 14:39:51 2019
@@ -37,12 +37,12 @@ define i64 @test_mm_cvtsi128_si64(<2 x i
 define <2 x double> @test_mm_cvtsi64_sd(<2 x double> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: test_mm_cvtsi64_sd:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_mm_cvtsi64_sd:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1)
   ret <2 x double> %res

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll Mon May  6 14:39:51 2019
@@ -1617,17 +1617,17 @@ define <2 x double> @test_mm_cvtsi32_sd(
 ;
 ; X64-SSE-LABEL: test_mm_cvtsi32_sd:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    cvtsi2sdl %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
+; X64-SSE-NEXT:    cvtsi2sd %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
 ; X64-SSE-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX1-LABEL: test_mm_cvtsi32_sd:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX1-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
 ; X64-AVX1-NEXT:    retq # encoding: [0xc3]
 ;
 ; X64-AVX512-LABEL: test_mm_cvtsi32_sd:
 ; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX512-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
   %cvt = sitofp i32 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll Mon May  6 14:39:51 2019
@@ -670,17 +670,17 @@ define <2 x double> @test_x86_sse2_cvtsi
 ;
 ; X64-SSE-LABEL: test_x86_sse2_cvtsi2sd:
 ; X64-SSE:       ## %bb.0:
-; X64-SSE-NEXT:    cvtsi2sdl %edi, %xmm0 ## encoding: [0xf2,0x0f,0x2a,0xc7]
+; X64-SSE-NEXT:    cvtsi2sd %edi, %xmm0 ## encoding: [0xf2,0x0f,0x2a,0xc7]
 ; X64-SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; X64-AVX1-LABEL: test_x86_sse2_cvtsi2sd:
 ; X64-AVX1:       ## %bb.0:
-; X64-AVX1-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX1-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0xc7]
 ; X64-AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
 ; X64-AVX512-LABEL: test_x86_sse2_cvtsi2sd:
 ; X64-AVX512:       ## %bb.0:
-; X64-AVX512-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
+; X64-AVX512-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
 ; X64-AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
@@ -892,11 +892,19 @@ define <16 x i8> @test_x86_sse2_paddus_b
 ; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
+; AVX1-LABEL: test_x86_sse2_paddus_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_paddus_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ; AVX2-LABEL: test_x86_sse2_paddus_b:
 ; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
 ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
 ; SKX-LABEL: test_x86_sse2_paddus_b:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
@@ -913,11 +921,19 @@ define <8 x i16> @test_x86_sse2_paddus_w
 ; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
+; AVX1-LABEL: test_x86_sse2_paddus_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_paddus_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ; AVX2-LABEL: test_x86_sse2_paddus_w:
 ; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
 ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
 ; SKX-LABEL: test_x86_sse2_paddus_w:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
@@ -976,11 +992,19 @@ define <16 x i8> @test_x86_sse2_psubus_b
 ; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
+; AVX1-LABEL: test_x86_sse2_psubus_b:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubus_b:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ; AVX2-LABEL: test_x86_sse2_psubus_b:
 ; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
 ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
 ; SKX-LABEL: test_x86_sse2_psubus_b:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
@@ -997,11 +1021,19 @@ define <8 x i16> @test_x86_sse2_psubus_w
 ; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
 ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ;
+; AVX1-LABEL: test_x86_sse2_psubus_w:
+; AVX1:       ## %bb.0:
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
+; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
+;
+; AVX512-LABEL: test_x86_sse2_psubus_w:
+; AVX512:       ## %bb.0:
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
+; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
 ; AVX2-LABEL: test_x86_sse2_psubus_w:
 ; AVX2:       ## %bb.0:
 ; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
 ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3]
-;
 ; SKX-LABEL: test_x86_sse2_psubus_w:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]

Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86_64-upgrade.ll Mon May  6 14:39:51 2019
@@ -6,17 +6,17 @@
 define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
 ; SSE-LABEL: test_x86_sse2_cvtsi642sd:
 ; SSE:       ## %bb.0:
-; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
+; SSE-NEXT:    cvtsi2sd %rdi, %xmm0 ## encoding: [0xf2,0x48,0x0f,0x2a,0xc7]
 ; SSE-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX1-LABEL: test_x86_sse2_cvtsi642sd:
 ; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; AVX1-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
 ; AVX1-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512-LABEL: test_x86_sse2_cvtsi642sd:
 ; AVX512:       ## %bb.0:
-; AVX512-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
+; AVX512-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfb,0x2a,0xc7]
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res

Modified: llvm/trunk/test/CodeGen/X86/uint64-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint64-to-float.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint64-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint64-to-float.ll Mon May  6 14:39:51 2019
@@ -33,14 +33,14 @@ define float @test(i64 %a) nounwind {
 ; X64-NEXT:    testq %rdi, %rdi
 ; X64-NEXT:    js .LBB0_1
 ; X64-NEXT:  # %bb.2: # %entry
-; X64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; X64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB0_1:
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    shrq %rax
 ; X64-NEXT:    andl $1, %edi
 ; X64-NEXT:    orq %rax, %rdi
-; X64-NEXT:    cvtsi2ssq %rdi, %xmm0
+; X64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; X64-NEXT:    addss %xmm0, %xmm0
 ; X64-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/uint_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint_to_fp.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint_to_fp.ll Mon May  6 14:39:51 2019
@@ -9,14 +9,14 @@ define void @test(i32 %x, float* %y) nou
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    shrl $23, %ecx
-; X32-NEXT:    cvtsi2ssl %ecx, %xmm0
+; X32-NEXT:    cvtsi2ss %ecx, %xmm0
 ; X32-NEXT:    movss %xmm0, (%eax)
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: test:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    shrl $23, %edi
-; X64-NEXT:    cvtsi2ssl %edi, %xmm0
+; X64-NEXT:    cvtsi2ss %edi, %xmm0
 ; X64-NEXT:    movss %xmm0, (%rsi)
 ; X64-NEXT:    retq
 entry:

Modified: llvm/trunk/test/CodeGen/X86/vec_int_to_fp-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_int_to_fp-widen.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_int_to_fp-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp-widen.ll Mon May  6 14:39:51 2019
@@ -22,11 +22,11 @@ define <2 x double> @sitofp_2i64_to_2f64
 ; SSE2-LABEL: sitofp_2i64_to_2f64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -34,37 +34,37 @@ define <2 x double> @sitofp_2i64_to_2f64
 ; SSE41-LABEL: sitofp_2i64_to_2f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_2i64_to_2f64:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_2f64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_2f64:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -232,18 +232,18 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; SSE2-LABEL: sitofp_4i64_to_4f64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0]
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm3
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
 ; SSE2-NEXT:    movaps %xmm2, %xmm0
 ; SSE2-NEXT:    movaps %xmm3, %xmm1
@@ -252,17 +252,17 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; SSE41-LABEL: sitofp_4i64_to_4f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -270,14 +270,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vmovq %xmm1, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -286,14 +286,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vmovq %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -302,14 +302,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -318,14 +318,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -1161,11 +1161,11 @@ define <4 x float> @sitofp_2i64_to_4f32(
 ; SSE2-LABEL: sitofp_2i64_to_4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -1173,43 +1173,43 @@ define <4 x float> @sitofp_2i64_to_4f32(
 ; SSE41-LABEL: sitofp_2i64_to_4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_2i64_to_4f32:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1236,10 +1236,10 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSE2-NEXT:    retq
@@ -1247,10 +1247,10 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; SSE41-LABEL: sitofp_2i64_to_4f32_zero:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movq %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],zero,zero
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -1258,27 +1258,27 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; VEX-LABEL: sitofp_2i64_to_4f32_zero:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_4f32_zero:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_4f32_zero:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX512VL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; AVX512VL-NEXT:    retq
@@ -1304,14 +1304,14 @@ define <4 x float> @sitofp_4i64_to_4f32_
 ; SSE2-LABEL: sitofp_4i64_to_4f32_undef:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -1319,46 +1319,46 @@ define <4 x float> @sitofp_4i64_to_4f32_
 ; SSE41-LABEL: sitofp_4i64_to_4f32_undef:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_4i64_to_4f32_undef:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_4i64_to_4f32_undef:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_4i64_to_4f32_undef:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1539,19 +1539,19 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; SSE2-LABEL: sitofp_4i64_to_4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
@@ -1560,34 +1560,34 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; SSE41-LABEL: sitofp_4i64_to_4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: sitofp_4i64_to_4f32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1595,16 +1595,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX2-LABEL: sitofp_4i64_to_4f32:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1612,16 +1612,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX512F-LABEL: sitofp_4i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1629,16 +1629,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX512VL-LABEL: sitofp_4i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1831,7 +1831,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    js .LBB39_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB39_3
 ; SSE2-NEXT:  .LBB39_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB39_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -1848,7 +1848,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    js .LBB39_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ; SSE2-NEXT:  .LBB39_4:
@@ -1857,7 +1857,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
@@ -1868,14 +1868,14 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB39_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB39_3
 ; SSE41-NEXT:  .LBB39_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB39_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -1883,7 +1883,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    js .LBB39_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB39_4:
@@ -1892,7 +1892,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
@@ -1903,28 +1903,28 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB39_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB39_3
 ; VEX-NEXT:  .LBB39_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB39_3:
 ; VEX-NEXT:    vmovq %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB39_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    jmp .LBB39_6
 ; VEX-NEXT:  .LBB39_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB39_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -1932,7 +1932,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:    js .LBB39_8
 ; VEX-NEXT:  # %bb.7:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:  .LBB39_8:
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
@@ -1940,22 +1940,22 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; AVX512F-LABEL: uitofp_2i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_2i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1985,7 +1985,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    js .LBB40_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB40_3
 ; SSE2-NEXT:  .LBB40_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -1993,7 +1993,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB40_3:
 ; SSE2-NEXT:    movq %xmm0, %rax
@@ -2001,7 +2001,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    js .LBB40_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB40_6
 ; SSE2-NEXT:  .LBB40_4:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2009,7 +2009,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB40_6:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2024,7 +2024,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    js .LBB40_1
 ; SSE41-NEXT:  # %bb.2:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB40_3
 ; SSE41-NEXT:  .LBB40_1:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2032,7 +2032,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB40_3:
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
@@ -2040,7 +2040,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    js .LBB40_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB40_4:
@@ -2049,7 +2049,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
 ; SSE41-NEXT:    retq
@@ -2060,21 +2060,21 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB40_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB40_3
 ; VEX-NEXT:  .LBB40_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB40_3:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB40_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
 ; VEX-NEXT:  .LBB40_4:
@@ -2082,7 +2082,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
@@ -2090,18 +2090,18 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; AVX512F-LABEL: uitofp_2i64_to_2f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_2i64_to_2f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX512VL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; AVX512VL-NEXT:    retq
@@ -2132,7 +2132,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB41_3
 ; SSE2-NEXT:  .LBB41_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2140,7 +2140,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB41_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2149,7 +2149,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB41_6
 ; SSE2-NEXT:  .LBB41_4:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2157,7 +2157,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB41_6:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2166,7 +2166,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_8
 ; SSE2-NEXT:  # %bb.7:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:  .LBB41_8:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE2-NEXT:    retq
@@ -2177,14 +2177,14 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB41_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB41_3
 ; SSE41-NEXT:  .LBB41_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB41_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -2192,7 +2192,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    js .LBB41_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB41_6
 ; SSE41-NEXT:  .LBB41_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2200,7 +2200,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB41_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -2209,7 +2209,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    js .LBB41_8
 ; SSE41-NEXT:  # %bb.7:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:  .LBB41_8:
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE41-NEXT:    retq
@@ -2220,28 +2220,28 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB41_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB41_3
 ; VEX-NEXT:  .LBB41_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB41_3:
 ; VEX-NEXT:    vmovq %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB41_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    jmp .LBB41_6
 ; VEX-NEXT:  .LBB41_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB41_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -2249,7 +2249,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:    js .LBB41_8
 ; VEX-NEXT:  # %bb.7:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:  .LBB41_8:
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
@@ -2257,22 +2257,22 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; AVX512F-LABEL: uitofp_4i64_to_4f32_undef:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_4i64_to_4f32_undef:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -2514,14 +2514,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB47_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB47_3
 ; SSE2-NEXT:  .LBB47_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB47_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2529,14 +2529,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB47_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB47_6
 ; SSE2-NEXT:  .LBB47_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB47_6:
 ; SSE2-NEXT:    movq %xmm0, %rax
@@ -2544,7 +2544,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    js .LBB47_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB47_9
 ; SSE2-NEXT:  .LBB47_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2552,7 +2552,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB47_9:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -2562,7 +2562,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    js .LBB47_10
 ; SSE2-NEXT:  # %bb.11:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB47_12
 ; SSE2-NEXT:  .LBB47_10:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2570,7 +2570,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB47_12:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2584,14 +2584,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB47_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB47_3
 ; SSE41-NEXT:  .LBB47_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB47_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -2599,7 +2599,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB47_6
 ; SSE41-NEXT:  .LBB47_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2607,7 +2607,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB47_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
@@ -2616,7 +2616,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_7
 ; SSE41-NEXT:  # %bb.8:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB47_9
 ; SSE41-NEXT:  .LBB47_7:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2624,7 +2624,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB47_9:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
@@ -2633,7 +2633,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB47_10:
@@ -2642,7 +2642,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
@@ -2653,28 +2653,28 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_1
 ; AVX1-NEXT:  # %bb.2:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    jmp .LBB47_3
 ; AVX1-NEXT:  .LBB47_1:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:  .LBB47_3:
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_4
 ; AVX1-NEXT:  # %bb.5:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    jmp .LBB47_6
 ; AVX1-NEXT:  .LBB47_4:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:  .LBB47_6:
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -2683,14 +2683,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_7
 ; AVX1-NEXT:  # %bb.8:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    jmp .LBB47_9
 ; AVX1-NEXT:  .LBB47_7:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:  .LBB47_9:
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -2698,7 +2698,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_10
 ; AVX1-NEXT:  # %bb.11:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2707,7 +2707,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
@@ -2719,28 +2719,28 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_1
 ; AVX2-NEXT:  # %bb.2:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    jmp .LBB47_3
 ; AVX2-NEXT:  .LBB47_1:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:  .LBB47_3:
 ; AVX2-NEXT:    vmovq %xmm0, %rax
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_4
 ; AVX2-NEXT:  # %bb.5:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    jmp .LBB47_6
 ; AVX2-NEXT:  .LBB47_4:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:  .LBB47_6:
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -2749,14 +2749,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_7
 ; AVX2-NEXT:  # %bb.8:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    jmp .LBB47_9
 ; AVX2-NEXT:  .LBB47_7:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:  .LBB47_9:
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -2764,7 +2764,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_10
 ; AVX2-NEXT:  # %bb.11:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2773,7 +2773,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
@@ -2782,16 +2782,16 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX512F-LABEL: uitofp_4i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -2799,16 +2799,16 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX512VL-LABEL: uitofp_4i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -3074,11 +3074,11 @@ define <2 x double> @sitofp_load_2i64_to
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
@@ -3086,10 +3086,10 @@ define <2 x double> @sitofp_load_2i64_to
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3097,9 +3097,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; VEX-NEXT:    retq
 ;
@@ -3107,9 +3107,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -3117,9 +3117,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -3219,19 +3219,19 @@ define <4 x double> @sitofp_load_4i64_to
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm2
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE2-NEXT:    retq
 ;
@@ -3240,17 +3240,17 @@ define <4 x double> @sitofp_load_4i64_to
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3259,14 +3259,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; VEX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; VEX-NEXT:    retq
@@ -3276,14 +3276,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -3293,14 +3293,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -3902,19 +3902,19 @@ define <4 x float> @sitofp_load_4i64_to_
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm0
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE2-NEXT:    retq
@@ -3924,18 +3924,18 @@ define <4 x float> @sitofp_load_4i64_to_
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3944,15 +3944,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; VEX-NEXT:    retq
 ;
@@ -3961,15 +3961,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -3978,15 +3978,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -4081,36 +4081,36 @@ define <8 x float> @sitofp_load_8i64_to_
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm2
 ; SSE2-NEXT:    movdqa 48(%rdi), %xmm3
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; SSE2-NEXT:    movq %xmm3, %rax
 ; SSE2-NEXT:    xorps %xmm4, %xmm4
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
 ; SSE2-NEXT:    retq
@@ -4122,33 +4122,33 @@ define <8 x float> @sitofp_load_8i64_to_
 ; SSE41-NEXT:    movdqa 32(%rdi), %xmm2
 ; SSE41-NEXT:    movdqa 48(%rdi), %xmm3
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    pextrq $1, %xmm2, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
 ; SSE41-NEXT:    movq %xmm3, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; SSE41-NEXT:    pextrq $1, %xmm3, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -4159,26 +4159,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; VEX-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; VEX-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; VEX-NEXT:    vpextrq $1, %xmm2, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; VEX-NEXT:    vmovq %xmm2, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; VEX-NEXT:    vmovq %xmm3, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; VEX-NEXT:    vpextrq $1, %xmm3, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; VEX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; VEX-NEXT:    retq
@@ -4190,26 +4190,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512F-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; AVX512F-NEXT:    vmovq %xmm2, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512F-NEXT:    vmovq %xmm3, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -4221,26 +4221,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512VL-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vmovq %xmm2, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm3, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -4377,14 +4377,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB76_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB76_3
 ; SSE2-NEXT:  .LBB76_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB76_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -4392,14 +4392,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB76_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB76_6
 ; SSE2-NEXT:  .LBB76_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB76_6:
 ; SSE2-NEXT:    movq %xmm2, %rax
@@ -4407,7 +4407,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    js .LBB76_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB76_9
 ; SSE2-NEXT:  .LBB76_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4415,7 +4415,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB76_9:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
@@ -4425,7 +4425,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    js .LBB76_10
 ; SSE2-NEXT:  # %bb.11:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB76_12
 ; SSE2-NEXT:  .LBB76_10:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4433,7 +4433,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB76_12:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -4448,14 +4448,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB76_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB76_3
 ; SSE41-NEXT:  .LBB76_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB76_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -4463,7 +4463,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB76_6
 ; SSE41-NEXT:  .LBB76_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4471,7 +4471,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB76_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
@@ -4480,7 +4480,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_7
 ; SSE41-NEXT:  # %bb.8:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB76_9
 ; SSE41-NEXT:  .LBB76_7:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4488,7 +4488,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB76_9:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
@@ -4497,7 +4497,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB76_10:
@@ -4506,7 +4506,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
@@ -4519,28 +4519,28 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB76_3
 ; VEX-NEXT:  .LBB76_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB76_3:
 ; VEX-NEXT:    vmovq %xmm2, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    jmp .LBB76_6
 ; VEX-NEXT:  .LBB76_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB76_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -4548,14 +4548,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_7
 ; VEX-NEXT:  # %bb.8:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    jmp .LBB76_9
 ; VEX-NEXT:  .LBB76_7:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB76_9:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -4563,7 +4563,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_10
 ; VEX-NEXT:  # %bb.11:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; VEX-NEXT:    retq
 ; VEX-NEXT:  .LBB76_10:
@@ -4571,7 +4571,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; VEX-NEXT:    retq
@@ -4581,15 +4581,15 @@ define <4 x float> @uitofp_load_4i64_to_
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -4598,15 +4598,15 @@ define <4 x float> @uitofp_load_4i64_to_
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -4766,14 +4766,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB80_3
 ; SSE2-NEXT:  .LBB80_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB80_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -4781,14 +4781,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    jmp .LBB80_6
 ; SSE2-NEXT:  .LBB80_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    addss %xmm4, %xmm4
 ; SSE2-NEXT:  .LBB80_6:
 ; SSE2-NEXT:    movq %xmm5, %rax
@@ -4796,7 +4796,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB80_9
 ; SSE2-NEXT:  .LBB80_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4804,7 +4804,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB80_9:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
@@ -4812,14 +4812,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_10
 ; SSE2-NEXT:  # %bb.11:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE2-NEXT:    jmp .LBB80_12
 ; SSE2-NEXT:  .LBB80_10:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE2-NEXT:    addss %xmm6, %xmm6
 ; SSE2-NEXT:  .LBB80_12:
 ; SSE2-NEXT:    movq %xmm1, %rax
@@ -4827,7 +4827,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_13
 ; SSE2-NEXT:  # %bb.14:
 ; SSE2-NEXT:    xorps %xmm5, %xmm5
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE2-NEXT:    jmp .LBB80_15
 ; SSE2-NEXT:  .LBB80_13:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4835,7 +4835,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm5, %xmm5
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE2-NEXT:    addss %xmm5, %xmm5
 ; SSE2-NEXT:  .LBB80_15:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -4843,14 +4843,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_16
 ; SSE2-NEXT:  # %bb.17:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm7
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm7
 ; SSE2-NEXT:    jmp .LBB80_18
 ; SSE2-NEXT:  .LBB80_16:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm7
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm7
 ; SSE2-NEXT:    addss %xmm7, %xmm7
 ; SSE2-NEXT:  .LBB80_18:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
@@ -4860,7 +4860,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_19
 ; SSE2-NEXT:  # %bb.20:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB80_21
 ; SSE2-NEXT:  .LBB80_19:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4868,7 +4868,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB80_21:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
@@ -4879,7 +4879,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_22
 ; SSE2-NEXT:  # %bb.23:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB80_24
 ; SSE2-NEXT:  .LBB80_22:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4887,7 +4887,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB80_24:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
@@ -4904,14 +4904,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    jmp .LBB80_3
 ; SSE41-NEXT:  .LBB80_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    addss %xmm3, %xmm3
 ; SSE41-NEXT:  .LBB80_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -4919,7 +4919,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB80_6
 ; SSE41-NEXT:  .LBB80_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4927,21 +4927,21 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB80_6:
 ; SSE41-NEXT:    movq %xmm4, %rax
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_7
 ; SSE41-NEXT:  # %bb.8:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE41-NEXT:    jmp .LBB80_9
 ; SSE41-NEXT:  .LBB80_7:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE41-NEXT:    addss %xmm5, %xmm5
 ; SSE41-NEXT:  .LBB80_9:
 ; SSE41-NEXT:    pextrq $1, %xmm4, %rax
@@ -4949,7 +4949,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    jmp .LBB80_12
 ; SSE41-NEXT:  .LBB80_10:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4957,21 +4957,21 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    addss %xmm4, %xmm4
 ; SSE41-NEXT:  .LBB80_12:
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_13
 ; SSE41-NEXT:  # %bb.14:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE41-NEXT:    jmp .LBB80_15
 ; SSE41-NEXT:  .LBB80_13:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE41-NEXT:    addss %xmm6, %xmm6
 ; SSE41-NEXT:  .LBB80_15:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
@@ -4980,7 +4980,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_16
 ; SSE41-NEXT:  # %bb.17:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB80_18
 ; SSE41-NEXT:  .LBB80_16:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4988,7 +4988,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB80_18:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
@@ -4998,7 +4998,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_19
 ; SSE41-NEXT:  # %bb.20:
 ; SSE41-NEXT:    xorps %xmm3, %xmm3
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    jmp .LBB80_21
 ; SSE41-NEXT:  .LBB80_19:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -5006,7 +5006,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm3, %xmm3
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    addss %xmm3, %xmm3
 ; SSE41-NEXT:  .LBB80_21:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
@@ -5016,7 +5016,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_22
 ; SSE41-NEXT:  # %bb.23:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB80_22:
@@ -5025,7 +5025,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
@@ -5040,70 +5040,70 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    jmp .LBB80_3
 ; VEX-NEXT:  .LBB80_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB80_3:
 ; VEX-NEXT:    vmovq %xmm4, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm5
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm5
 ; VEX-NEXT:    jmp .LBB80_6
 ; VEX-NEXT:  .LBB80_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; VEX-NEXT:    vaddss %xmm4, %xmm4, %xmm5
 ; VEX-NEXT:  .LBB80_6:
 ; VEX-NEXT:    vmovq %xmm3, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_7
 ; VEX-NEXT:  # %bb.8:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
 ; VEX-NEXT:    jmp .LBB80_9
 ; VEX-NEXT:  .LBB80_7:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
 ; VEX-NEXT:    vaddss %xmm4, %xmm4, %xmm4
 ; VEX-NEXT:  .LBB80_9:
 ; VEX-NEXT:    vpextrq $1, %xmm3, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_10
 ; VEX-NEXT:  # %bb.11:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
 ; VEX-NEXT:    jmp .LBB80_12
 ; VEX-NEXT:  .LBB80_10:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
 ; VEX-NEXT:    vaddss %xmm3, %xmm3, %xmm3
 ; VEX-NEXT:  .LBB80_12:
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_13
 ; VEX-NEXT:  # %bb.14:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm6
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm6
 ; VEX-NEXT:    jmp .LBB80_15
 ; VEX-NEXT:  .LBB80_13:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm6
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm6
 ; VEX-NEXT:    vaddss %xmm6, %xmm6, %xmm6
 ; VEX-NEXT:  .LBB80_15:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[2,3]
@@ -5111,14 +5111,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_16
 ; VEX-NEXT:  # %bb.17:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm1
 ; VEX-NEXT:    jmp .LBB80_18
 ; VEX-NEXT:  .LBB80_16:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB80_18:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm5 = xmm1[0],xmm6[0],xmm1[2,3]
@@ -5127,14 +5127,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_19
 ; VEX-NEXT:  # %bb.20:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm2
 ; VEX-NEXT:    jmp .LBB80_21
 ; VEX-NEXT:  .LBB80_19:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB80_21:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm5[0,1],xmm2[0],xmm5[3]
@@ -5143,14 +5143,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_22
 ; VEX-NEXT:  # %bb.23:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm0
 ; VEX-NEXT:    jmp .LBB80_24
 ; VEX-NEXT:  .LBB80_22:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB80_24:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
@@ -5164,26 +5164,26 @@ define <8 x float> @uitofp_load_8i64_to_
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512F-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; AVX512F-NEXT:    vmovq %xmm2, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512F-NEXT:    vmovq %xmm3, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -5195,26 +5195,26 @@ define <8 x float> @uitofp_load_8i64_to_
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512VL-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vmovq %xmm2, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm3, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -5492,12 +5492,12 @@ define void @aggregate_sitofp_8i16_to_8f
 define <2 x double> @sitofp_i32_to_2f64(<2 x double> %a0, i32 %a1) nounwind {
 ; SSE-LABEL: sitofp_i32_to_2f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2sdl %edi, %xmm0
+; SSE-NEXT:    cvtsi2sd %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i32_to_2f64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i32 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
@@ -5507,12 +5507,12 @@ define <2 x double> @sitofp_i32_to_2f64(
 define <4 x float> @sitofp_i32_to_4f32(<4 x float> %a0, i32 %a1) nounwind {
 ; SSE-LABEL: sitofp_i32_to_4f32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2ssl %edi, %xmm0
+; SSE-NEXT:    cvtsi2ss %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i32_to_4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i32 %a1 to float
   %res = insertelement <4 x float> %a0, float %cvt, i32 0
@@ -5522,12 +5522,12 @@ define <4 x float> @sitofp_i32_to_4f32(<
 define <2 x double> @sitofp_i64_to_2f64(<2 x double> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: sitofp_i64_to_2f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i64_to_2f64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i64 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
@@ -5537,12 +5537,12 @@ define <2 x double> @sitofp_i64_to_2f64(
 define <4 x float> @sitofp_i64_to_4f32(<4 x float> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: sitofp_i64_to_4f32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i64_to_4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i64 %a1 to float
   %res = insertelement <4 x float> %a0, float %cvt, i32 0

Modified: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll Mon May  6 14:39:51 2019
@@ -22,11 +22,11 @@ define <2 x double> @sitofp_2i64_to_2f64
 ; SSE2-LABEL: sitofp_2i64_to_2f64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -34,37 +34,37 @@ define <2 x double> @sitofp_2i64_to_2f64
 ; SSE41-LABEL: sitofp_2i64_to_2f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_2i64_to_2f64:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_2f64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_2f64:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -232,18 +232,18 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; SSE2-LABEL: sitofp_4i64_to_4f64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0]
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm3
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
 ; SSE2-NEXT:    movaps %xmm2, %xmm0
 ; SSE2-NEXT:    movaps %xmm3, %xmm1
@@ -252,17 +252,17 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; SSE41-LABEL: sitofp_4i64_to_4f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -270,14 +270,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vmovq %xmm1, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -286,14 +286,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vmovq %xmm1, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
@@ -302,14 +302,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -318,14 +318,14 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -1161,11 +1161,11 @@ define <4 x float> @sitofp_2i64_to_4f32(
 ; SSE2-LABEL: sitofp_2i64_to_4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -1173,43 +1173,43 @@ define <4 x float> @sitofp_2i64_to_4f32(
 ; SSE41-LABEL: sitofp_2i64_to_4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_2i64_to_4f32:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1236,10 +1236,10 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; SSE2-NEXT:    retq
@@ -1247,10 +1247,10 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; SSE41-LABEL: sitofp_2i64_to_4f32_zero:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movq %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],zero,zero
 ; SSE41-NEXT:    movaps %xmm1, %xmm0
 ; SSE41-NEXT:    retq
@@ -1258,27 +1258,27 @@ define <4 x float> @sitofp_2i64_to_4f32_
 ; VEX-LABEL: sitofp_2i64_to_4f32_zero:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_2i64_to_4f32_zero:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_2i64_to_4f32_zero:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX512VL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; AVX512VL-NEXT:    retq
@@ -1304,14 +1304,14 @@ define <4 x float> @sitofp_4i64_to_4f32_
 ; SSE2-LABEL: sitofp_4i64_to_4f32_undef:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
@@ -1319,46 +1319,46 @@ define <4 x float> @sitofp_4i64_to_4f32_
 ; SSE41-LABEL: sitofp_4i64_to_4f32_undef:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: sitofp_4i64_to_4f32_undef:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: sitofp_4i64_to_4f32_undef:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: sitofp_4i64_to_4f32_undef:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1539,19 +1539,19 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; SSE2-LABEL: sitofp_4i64_to_4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE2-NEXT:    movaps %xmm1, %xmm0
@@ -1560,34 +1560,34 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; SSE41-LABEL: sitofp_4i64_to_4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: sitofp_4i64_to_4f32:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1595,16 +1595,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX2-LABEL: sitofp_4i64_to_4f32:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1612,16 +1612,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX512F-LABEL: sitofp_4i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1629,16 +1629,16 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX512VL-LABEL: sitofp_4i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -1831,7 +1831,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    js .LBB39_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB39_3
 ; SSE2-NEXT:  .LBB39_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB39_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -1848,7 +1848,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    js .LBB39_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ; SSE2-NEXT:  .LBB39_4:
@@ -1857,7 +1857,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
@@ -1868,14 +1868,14 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB39_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB39_3
 ; SSE41-NEXT:  .LBB39_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB39_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -1883,7 +1883,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    js .LBB39_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB39_4:
@@ -1892,7 +1892,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
 ; SSE41-NEXT:    retq
@@ -1903,28 +1903,28 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB39_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB39_3
 ; VEX-NEXT:  .LBB39_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB39_3:
 ; VEX-NEXT:    vmovq %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB39_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    jmp .LBB39_6
 ; VEX-NEXT:  .LBB39_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB39_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -1932,7 +1932,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:    js .LBB39_8
 ; VEX-NEXT:  # %bb.7:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:  .LBB39_8:
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
@@ -1940,22 +1940,22 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; AVX512F-LABEL: uitofp_2i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_2i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -1985,7 +1985,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    js .LBB40_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB40_3
 ; SSE2-NEXT:  .LBB40_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -1993,7 +1993,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB40_3:
 ; SSE2-NEXT:    movq %xmm0, %rax
@@ -2001,7 +2001,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    js .LBB40_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB40_6
 ; SSE2-NEXT:  .LBB40_4:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2009,7 +2009,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB40_6:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2024,7 +2024,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    js .LBB40_1
 ; SSE41-NEXT:  # %bb.2:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB40_3
 ; SSE41-NEXT:  .LBB40_1:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2032,7 +2032,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB40_3:
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
@@ -2040,7 +2040,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    js .LBB40_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB40_4:
@@ -2049,7 +2049,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
 ; SSE41-NEXT:    retq
@@ -2060,21 +2060,21 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB40_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB40_3
 ; VEX-NEXT:  .LBB40_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB40_3:
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB40_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
 ; VEX-NEXT:  .LBB40_4:
@@ -2082,7 +2082,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; VEX-NEXT:    retq
@@ -2090,18 +2090,18 @@ define <4 x float> @uitofp_2i64_to_2f32(
 ; AVX512F-LABEL: uitofp_2i64_to_2f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_2i64_to_2f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX512VL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; AVX512VL-NEXT:    retq
@@ -2132,7 +2132,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_1
 ; SSE2-NEXT:  # %bb.2:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB41_3
 ; SSE2-NEXT:  .LBB41_1:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2140,7 +2140,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB41_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2149,7 +2149,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_4
 ; SSE2-NEXT:  # %bb.5:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB41_6
 ; SSE2-NEXT:  .LBB41_4:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2157,7 +2157,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB41_6:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2166,7 +2166,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE2-NEXT:    js .LBB41_8
 ; SSE2-NEXT:  # %bb.7:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:  .LBB41_8:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE2-NEXT:    retq
@@ -2177,14 +2177,14 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB41_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB41_3
 ; SSE41-NEXT:  .LBB41_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB41_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -2192,7 +2192,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    js .LBB41_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB41_6
 ; SSE41-NEXT:  .LBB41_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2200,7 +2200,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB41_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -2209,7 +2209,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE41-NEXT:    js .LBB41_8
 ; SSE41-NEXT:  # %bb.7:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:  .LBB41_8:
 ; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; SSE41-NEXT:    retq
@@ -2220,28 +2220,28 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB41_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB41_3
 ; VEX-NEXT:  .LBB41_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB41_3:
 ; VEX-NEXT:    vmovq %xmm0, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB41_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    jmp .LBB41_6
 ; VEX-NEXT:  .LBB41_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB41_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
@@ -2249,7 +2249,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:    js .LBB41_8
 ; VEX-NEXT:  # %bb.7:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm1
 ; VEX-NEXT:  .LBB41_8:
 ; VEX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; VEX-NEXT:    retq
@@ -2257,22 +2257,22 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; AVX512F-LABEL: uitofp_4i64_to_4f32_undef:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: uitofp_4i64_to_4f32_undef:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
 ; AVX512VL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -2514,14 +2514,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB47_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB47_3
 ; SSE2-NEXT:  .LBB47_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB47_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2529,14 +2529,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB47_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB47_6
 ; SSE2-NEXT:  .LBB47_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB47_6:
 ; SSE2-NEXT:    movq %xmm0, %rax
@@ -2544,7 +2544,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    js .LBB47_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB47_9
 ; SSE2-NEXT:  .LBB47_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2552,7 +2552,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB47_9:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
@@ -2562,7 +2562,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    js .LBB47_10
 ; SSE2-NEXT:  # %bb.11:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB47_12
 ; SSE2-NEXT:  .LBB47_10:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -2570,7 +2570,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB47_12:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2584,14 +2584,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB47_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB47_3
 ; SSE41-NEXT:  .LBB47_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB47_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -2599,7 +2599,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB47_6
 ; SSE41-NEXT:  .LBB47_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2607,7 +2607,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB47_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
@@ -2616,7 +2616,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_7
 ; SSE41-NEXT:  # %bb.8:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB47_9
 ; SSE41-NEXT:  .LBB47_7:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -2624,7 +2624,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB47_9:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
@@ -2633,7 +2633,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    js .LBB47_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB47_10:
@@ -2642,7 +2642,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
@@ -2653,28 +2653,28 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_1
 ; AVX1-NEXT:  # %bb.2:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    jmp .LBB47_3
 ; AVX1-NEXT:  .LBB47_1:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX1-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:  .LBB47_3:
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_4
 ; AVX1-NEXT:  # %bb.5:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    jmp .LBB47_6
 ; AVX1-NEXT:  .LBB47_4:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX1-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:  .LBB47_6:
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -2683,14 +2683,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_7
 ; AVX1-NEXT:  # %bb.8:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    jmp .LBB47_9
 ; AVX1-NEXT:  .LBB47_7:
 ; AVX1-NEXT:    movq %rax, %rcx
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX1-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:  .LBB47_9:
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -2698,7 +2698,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    testq %rax, %rax
 ; AVX1-NEXT:    js .LBB47_10
 ; AVX1-NEXT:  # %bb.11:
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -2707,7 +2707,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX1-NEXT:    shrq %rcx
 ; AVX1-NEXT:    andl $1, %eax
 ; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX1-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX1-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vzeroupper
@@ -2719,28 +2719,28 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_1
 ; AVX2-NEXT:  # %bb.2:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    jmp .LBB47_3
 ; AVX2-NEXT:  .LBB47_1:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; AVX2-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:  .LBB47_3:
 ; AVX2-NEXT:    vmovq %xmm0, %rax
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_4
 ; AVX2-NEXT:  # %bb.5:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    jmp .LBB47_6
 ; AVX2-NEXT:  .LBB47_4:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX2-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:  .LBB47_6:
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -2749,14 +2749,14 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_7
 ; AVX2-NEXT:  # %bb.8:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    jmp .LBB47_9
 ; AVX2-NEXT:  .LBB47_7:
 ; AVX2-NEXT:    movq %rax, %rcx
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX2-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:  .LBB47_9:
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -2764,7 +2764,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    testq %rax, %rax
 ; AVX2-NEXT:    js .LBB47_10
 ; AVX2-NEXT:  # %bb.11:
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -2773,7 +2773,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    shrq %rcx
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    orq %rcx, %rax
-; AVX2-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX2-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
@@ -2782,16 +2782,16 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX512F-LABEL: uitofp_4i64_to_4f32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -2799,16 +2799,16 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX512VL-LABEL: uitofp_4i64_to_4f32:
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
@@ -3074,11 +3074,11 @@ define <2 x double> @sitofp_load_2i64_to
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
@@ -3086,10 +3086,10 @@ define <2 x double> @sitofp_load_2i64_to
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3097,9 +3097,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; VEX-NEXT:    retq
 ;
@@ -3107,9 +3107,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -3117,9 +3117,9 @@ define <2 x double> @sitofp_load_2i64_to
 ; AVX512VL:       # %bb.0:
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -3217,19 +3217,19 @@ define <4 x double> @sitofp_load_4i64_to
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm2
 ; SSE2-NEXT:    movq %xmm1, %rax
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE2-NEXT:    retq
 ;
@@ -3238,17 +3238,17 @@ define <4 x double> @sitofp_load_4i64_to
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm1
 ; SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3257,14 +3257,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; VEX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; VEX-NEXT:    retq
@@ -3274,14 +3274,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -3291,14 +3291,14 @@ define <4 x double> @sitofp_load_4i64_to
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -3898,19 +3898,19 @@ define <4 x float> @sitofp_load_4i64_to_
 ; SSE2-NEXT:    movdqa (%rdi), %xmm1
 ; SSE2-NEXT:    movdqa 16(%rdi), %xmm0
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE2-NEXT:    retq
@@ -3920,18 +3920,18 @@ define <4 x float> @sitofp_load_4i64_to_
 ; SSE41-NEXT:    movdqa (%rdi), %xmm0
 ; SSE41-NEXT:    movdqa 16(%rdi), %xmm1
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
@@ -3940,15 +3940,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; VEX-NEXT:    vmovdqa (%rdi), %xmm0
 ; VEX-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; VEX-NEXT:    retq
 ;
@@ -3957,15 +3957,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -3974,15 +3974,15 @@ define <4 x float> @sitofp_load_4i64_to_
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -4077,36 +4077,36 @@ define <8 x float> @sitofp_load_8i64_to_
 ; SSE2-NEXT:    movdqa 32(%rdi), %xmm2
 ; SSE2-NEXT:    movdqa 48(%rdi), %xmm3
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm0, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
 ; SSE2-NEXT:    movq %xmm3, %rax
 ; SSE2-NEXT:    xorps %xmm4, %xmm4
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm1, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; SSE2-NEXT:    movq %xmm2, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
 ; SSE2-NEXT:    retq
@@ -4118,33 +4118,33 @@ define <8 x float> @sitofp_load_8i64_to_
 ; SSE41-NEXT:    movdqa 32(%rdi), %xmm2
 ; SSE41-NEXT:    movdqa 48(%rdi), %xmm3
 ; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    movq %xmm0, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
 ; SSE41-NEXT:    movq %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[0],xmm0[3]
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    pextrq $1, %xmm2, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    movq %xmm2, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2,3]
 ; SSE41-NEXT:    movq %xmm3, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; SSE41-NEXT:    pextrq $1, %xmm3, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
 ;
@@ -4155,26 +4155,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; VEX-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; VEX-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; VEX-NEXT:    vpextrq $1, %xmm2, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; VEX-NEXT:    vmovq %xmm2, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; VEX-NEXT:    vmovq %xmm3, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; VEX-NEXT:    vpextrq $1, %xmm3, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; VEX-NEXT:    vpextrq $1, %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vmovq %xmm0, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; VEX-NEXT:    vmovq %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; VEX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; VEX-NEXT:    retq
@@ -4186,26 +4186,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512F-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; AVX512F-NEXT:    vmovq %xmm2, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512F-NEXT:    vmovq %xmm3, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; AVX512F-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -4217,26 +4217,26 @@ define <8 x float> @sitofp_load_8i64_to_
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512VL-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vmovq %xmm2, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm2
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm3, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm0
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm1
+; AVX512VL-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -4373,14 +4373,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB76_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB76_3
 ; SSE2-NEXT:  .LBB76_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB76_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -4388,14 +4388,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB76_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB76_6
 ; SSE2-NEXT:  .LBB76_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB76_6:
 ; SSE2-NEXT:    movq %xmm2, %rax
@@ -4403,7 +4403,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    js .LBB76_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB76_9
 ; SSE2-NEXT:  .LBB76_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4411,7 +4411,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB76_9:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
@@ -4421,7 +4421,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    js .LBB76_10
 ; SSE2-NEXT:  # %bb.11:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB76_12
 ; SSE2-NEXT:  .LBB76_10:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4429,7 +4429,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB76_12:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -4444,14 +4444,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB76_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB76_3
 ; SSE41-NEXT:  .LBB76_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB76_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -4459,7 +4459,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB76_6
 ; SSE41-NEXT:  .LBB76_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4467,7 +4467,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB76_6:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
@@ -4476,7 +4476,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_7
 ; SSE41-NEXT:  # %bb.8:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    jmp .LBB76_9
 ; SSE41-NEXT:  .LBB76_7:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4484,7 +4484,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:  .LBB76_9:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
@@ -4493,7 +4493,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    js .LBB76_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB76_10:
@@ -4502,7 +4502,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; SSE41-NEXT:    retq
@@ -4515,28 +4515,28 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    jmp .LBB76_3
 ; VEX-NEXT:  .LBB76_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB76_3:
 ; VEX-NEXT:    vmovq %xmm2, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    jmp .LBB76_6
 ; VEX-NEXT:  .LBB76_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB76_6:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
@@ -4544,14 +4544,14 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_7
 ; VEX-NEXT:  # %bb.8:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    jmp .LBB76_9
 ; VEX-NEXT:  .LBB76_7:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB76_9:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
@@ -4559,7 +4559,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB76_10
 ; VEX-NEXT:  # %bb.11:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; VEX-NEXT:    retq
 ; VEX-NEXT:  .LBB76_10:
@@ -4567,7 +4567,7 @@ define <4 x float> @uitofp_load_4i64_to_
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; VEX-NEXT:    retq
@@ -4577,15 +4577,15 @@ define <4 x float> @uitofp_load_4i64_to_
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    retq
 ;
@@ -4594,15 +4594,15 @@ define <4 x float> @uitofp_load_4i64_to_
 ; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm1
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm3, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    retq
 ;
@@ -4762,14 +4762,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_1
 ; SSE2-NEXT:  # %bb.2:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    jmp .LBB80_3
 ; SSE2-NEXT:  .LBB80_1:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE2-NEXT:    addss %xmm3, %xmm3
 ; SSE2-NEXT:  .LBB80_3:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -4777,14 +4777,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_4
 ; SSE2-NEXT:  # %bb.5:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    jmp .LBB80_6
 ; SSE2-NEXT:  .LBB80_4:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE2-NEXT:    addss %xmm4, %xmm4
 ; SSE2-NEXT:  .LBB80_6:
 ; SSE2-NEXT:    movq %xmm5, %rax
@@ -4792,7 +4792,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_7
 ; SSE2-NEXT:  # %bb.8:
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    jmp .LBB80_9
 ; SSE2-NEXT:  .LBB80_7:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4800,7 +4800,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    addss %xmm0, %xmm0
 ; SSE2-NEXT:  .LBB80_9:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
@@ -4808,14 +4808,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_10
 ; SSE2-NEXT:  # %bb.11:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE2-NEXT:    jmp .LBB80_12
 ; SSE2-NEXT:  .LBB80_10:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE2-NEXT:    addss %xmm6, %xmm6
 ; SSE2-NEXT:  .LBB80_12:
 ; SSE2-NEXT:    movq %xmm1, %rax
@@ -4823,7 +4823,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_13
 ; SSE2-NEXT:  # %bb.14:
 ; SSE2-NEXT:    xorps %xmm5, %xmm5
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE2-NEXT:    jmp .LBB80_15
 ; SSE2-NEXT:  .LBB80_13:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4831,7 +4831,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm5, %xmm5
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE2-NEXT:    addss %xmm5, %xmm5
 ; SSE2-NEXT:  .LBB80_15:
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -4839,14 +4839,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    testq %rax, %rax
 ; SSE2-NEXT:    js .LBB80_16
 ; SSE2-NEXT:  # %bb.17:
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm7
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm7
 ; SSE2-NEXT:    jmp .LBB80_18
 ; SSE2-NEXT:  .LBB80_16:
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq %rcx
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm7
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm7
 ; SSE2-NEXT:    addss %xmm7, %xmm7
 ; SSE2-NEXT:  .LBB80_18:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
@@ -4856,7 +4856,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_19
 ; SSE2-NEXT:  # %bb.20:
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    jmp .LBB80_21
 ; SSE2-NEXT:  .LBB80_19:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4864,7 +4864,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm1, %xmm1
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE2-NEXT:    addss %xmm1, %xmm1
 ; SSE2-NEXT:  .LBB80_21:
 ; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
@@ -4875,7 +4875,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    js .LBB80_22
 ; SSE2-NEXT:  # %bb.23:
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    jmp .LBB80_24
 ; SSE2-NEXT:  .LBB80_22:
 ; SSE2-NEXT:    movq %rax, %rcx
@@ -4883,7 +4883,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE2-NEXT:    andl $1, %eax
 ; SSE2-NEXT:    orq %rcx, %rax
 ; SSE2-NEXT:    xorps %xmm2, %xmm2
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE2-NEXT:    addss %xmm2, %xmm2
 ; SSE2-NEXT:  .LBB80_24:
 ; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
@@ -4900,14 +4900,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_1
 ; SSE41-NEXT:  # %bb.2:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    jmp .LBB80_3
 ; SSE41-NEXT:  .LBB80_1:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    addss %xmm3, %xmm3
 ; SSE41-NEXT:  .LBB80_3:
 ; SSE41-NEXT:    movq %xmm0, %rax
@@ -4915,7 +4915,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_4
 ; SSE41-NEXT:  # %bb.5:
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    jmp .LBB80_6
 ; SSE41-NEXT:  .LBB80_4:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4923,21 +4923,21 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    addss %xmm0, %xmm0
 ; SSE41-NEXT:  .LBB80_6:
 ; SSE41-NEXT:    movq %xmm4, %rax
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_7
 ; SSE41-NEXT:  # %bb.8:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE41-NEXT:    jmp .LBB80_9
 ; SSE41-NEXT:  .LBB80_7:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm5
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm5
 ; SSE41-NEXT:    addss %xmm5, %xmm5
 ; SSE41-NEXT:  .LBB80_9:
 ; SSE41-NEXT:    pextrq $1, %xmm4, %rax
@@ -4945,7 +4945,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_10
 ; SSE41-NEXT:  # %bb.11:
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    jmp .LBB80_12
 ; SSE41-NEXT:  .LBB80_10:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4953,21 +4953,21 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm4, %xmm4
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm4
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm4
 ; SSE41-NEXT:    addss %xmm4, %xmm4
 ; SSE41-NEXT:  .LBB80_12:
 ; SSE41-NEXT:    pextrq $1, %xmm1, %rax
 ; SSE41-NEXT:    testq %rax, %rax
 ; SSE41-NEXT:    js .LBB80_13
 ; SSE41-NEXT:  # %bb.14:
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE41-NEXT:    jmp .LBB80_15
 ; SSE41-NEXT:  .LBB80_13:
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq %rcx
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm6
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm6
 ; SSE41-NEXT:    addss %xmm6, %xmm6
 ; SSE41-NEXT:  .LBB80_15:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
@@ -4976,7 +4976,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_16
 ; SSE41-NEXT:  # %bb.17:
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    jmp .LBB80_18
 ; SSE41-NEXT:  .LBB80_16:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -4984,7 +4984,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm1, %xmm1
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm1
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm1
 ; SSE41-NEXT:    addss %xmm1, %xmm1
 ; SSE41-NEXT:  .LBB80_18:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
@@ -4994,7 +4994,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_19
 ; SSE41-NEXT:  # %bb.20:
 ; SSE41-NEXT:    xorps %xmm3, %xmm3
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    jmp .LBB80_21
 ; SSE41-NEXT:  .LBB80_19:
 ; SSE41-NEXT:    movq %rax, %rcx
@@ -5002,7 +5002,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm3, %xmm3
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm3
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm3
 ; SSE41-NEXT:    addss %xmm3, %xmm3
 ; SSE41-NEXT:  .LBB80_21:
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
@@ -5012,7 +5012,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    js .LBB80_22
 ; SSE41-NEXT:  # %bb.23:
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
 ; SSE41-NEXT:  .LBB80_22:
@@ -5021,7 +5021,7 @@ define <8 x float> @uitofp_load_8i64_to_
 ; SSE41-NEXT:    andl $1, %eax
 ; SSE41-NEXT:    orq %rcx, %rax
 ; SSE41-NEXT:    xorps %xmm2, %xmm2
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm2
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm2
 ; SSE41-NEXT:    addss %xmm2, %xmm2
 ; SSE41-NEXT:    insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; SSE41-NEXT:    retq
@@ -5036,70 +5036,70 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_1
 ; VEX-NEXT:  # %bb.2:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    jmp .LBB80_3
 ; VEX-NEXT:  .LBB80_1:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB80_3:
 ; VEX-NEXT:    vmovq %xmm4, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_4
 ; VEX-NEXT:  # %bb.5:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm5
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm5
 ; VEX-NEXT:    jmp .LBB80_6
 ; VEX-NEXT:  .LBB80_4:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm5, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm5, %xmm4
 ; VEX-NEXT:    vaddss %xmm4, %xmm4, %xmm5
 ; VEX-NEXT:  .LBB80_6:
 ; VEX-NEXT:    vmovq %xmm3, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_7
 ; VEX-NEXT:  # %bb.8:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
 ; VEX-NEXT:    jmp .LBB80_9
 ; VEX-NEXT:  .LBB80_7:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm4
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm4
 ; VEX-NEXT:    vaddss %xmm4, %xmm4, %xmm4
 ; VEX-NEXT:  .LBB80_9:
 ; VEX-NEXT:    vpextrq $1, %xmm3, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_10
 ; VEX-NEXT:  # %bb.11:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
 ; VEX-NEXT:    jmp .LBB80_12
 ; VEX-NEXT:  .LBB80_10:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm3
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm3
 ; VEX-NEXT:    vaddss %xmm3, %xmm3, %xmm3
 ; VEX-NEXT:  .LBB80_12:
 ; VEX-NEXT:    vpextrq $1, %xmm1, %rax
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_13
 ; VEX-NEXT:  # %bb.14:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm6
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm6
 ; VEX-NEXT:    jmp .LBB80_15
 ; VEX-NEXT:  .LBB80_13:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm6, %xmm6
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm6, %xmm6
 ; VEX-NEXT:    vaddss %xmm6, %xmm6, %xmm6
 ; VEX-NEXT:  .LBB80_15:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[2,3]
@@ -5107,14 +5107,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_16
 ; VEX-NEXT:  # %bb.17:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm1
 ; VEX-NEXT:    jmp .LBB80_18
 ; VEX-NEXT:  .LBB80_16:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm1
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm1
 ; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
 ; VEX-NEXT:  .LBB80_18:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm5 = xmm1[0],xmm6[0],xmm1[2,3]
@@ -5123,14 +5123,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_19
 ; VEX-NEXT:  # %bb.20:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm2
 ; VEX-NEXT:    jmp .LBB80_21
 ; VEX-NEXT:  .LBB80_19:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm2
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm2
 ; VEX-NEXT:    vaddss %xmm2, %xmm2, %xmm2
 ; VEX-NEXT:  .LBB80_21:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm5[0,1],xmm2[0],xmm5[3]
@@ -5139,14 +5139,14 @@ define <8 x float> @uitofp_load_8i64_to_
 ; VEX-NEXT:    testq %rax, %rax
 ; VEX-NEXT:    js .LBB80_22
 ; VEX-NEXT:  # %bb.23:
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm0
 ; VEX-NEXT:    jmp .LBB80_24
 ; VEX-NEXT:  .LBB80_22:
 ; VEX-NEXT:    movq %rax, %rcx
 ; VEX-NEXT:    shrq %rcx
 ; VEX-NEXT:    andl $1, %eax
 ; VEX-NEXT:    orq %rcx, %rax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm7, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm7, %xmm0
 ; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; VEX-NEXT:  .LBB80_24:
 ; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
@@ -5160,26 +5160,26 @@ define <8 x float> @uitofp_load_8i64_to_
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512F-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; AVX512F-NEXT:    vmovq %xmm2, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512F-NEXT:    vmovq %xmm3, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vmovq %xmm0, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512F-NEXT:    vmovq %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512F-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; AVX512F-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
@@ -5191,26 +5191,26 @@ define <8 x float> @uitofp_load_8i64_to_
 ; AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm3
 ; AVX512VL-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm4, %xmm4
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm4, %xmm4
 ; AVX512VL-NEXT:    vmovq %xmm2, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm2
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm3, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm4
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vmovq %xmm0, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm0
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm0
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
 ; AVX512VL-NEXT:    vmovq %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm3
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX512VL-NEXT:    vcvtusi2ssq %rax, %xmm5, %xmm1
+; AVX512VL-NEXT:    vcvtusi2ss %rax, %xmm5, %xmm1
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
@@ -5490,12 +5490,12 @@ define void @aggregate_sitofp_8i16_to_8f
 define <2 x double> @sitofp_i32_to_2f64(<2 x double> %a0, i32 %a1) nounwind {
 ; SSE-LABEL: sitofp_i32_to_2f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2sdl %edi, %xmm0
+; SSE-NEXT:    cvtsi2sd %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i32_to_2f64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i32 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
@@ -5505,12 +5505,12 @@ define <2 x double> @sitofp_i32_to_2f64(
 define <4 x float> @sitofp_i32_to_4f32(<4 x float> %a0, i32 %a1) nounwind {
 ; SSE-LABEL: sitofp_i32_to_4f32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2ssl %edi, %xmm0
+; SSE-NEXT:    cvtsi2ss %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i32_to_4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i32 %a1 to float
   %res = insertelement <4 x float> %a0, float %cvt, i32 0
@@ -5520,12 +5520,12 @@ define <4 x float> @sitofp_i32_to_4f32(<
 define <2 x double> @sitofp_i64_to_2f64(<2 x double> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: sitofp_i64_to_2f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2sdq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2sd %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i64_to_2f64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i64 %a1 to double
   %res = insertelement <2 x double> %a0, double %cvt, i32 0
@@ -5535,12 +5535,12 @@ define <2 x double> @sitofp_i64_to_2f64(
 define <4 x float> @sitofp_i64_to_4f32(<4 x float> %a0, i64 %a1) nounwind {
 ; SSE-LABEL: sitofp_i64_to_4f32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    cvtsi2ssq %rdi, %xmm0
+; SSE-NEXT:    cvtsi2ss %rdi, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sitofp_i64_to_4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; AVX-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cvt = sitofp i64 %a1 to float
   %res = insertelement <4 x float> %a0, float %cvt, i32 0
@@ -5570,7 +5570,7 @@ define float @extract0_sitofp_v4i32_f32i
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; SSE-NEXT:    incl %eax
-; SSE-NEXT:    cvtsi2ssl %eax, %xmm1
+; SSE-NEXT:    cvtsi2ss %eax, %xmm1
 ; SSE-NEXT:    divss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
@@ -5579,7 +5579,7 @@ define float @extract0_sitofp_v4i32_f32i
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; AVX-NEXT:    incl %eax
-; AVX-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm1
+; AVX-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm1
 ; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %e = extractelement <4 x i32> %x, i32 0
@@ -5615,7 +5615,7 @@ define double @extract0_sitofp_v4i32_f64
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    cvtsi2sd %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: extract0_sitofp_v4i32_f64:
@@ -5632,13 +5632,13 @@ define float @extract0_uitofp_v4i32_f32(
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; VEX-LABEL: extract0_uitofp_v4i32_f32:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovd %xmm0, %eax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm0
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: extract0_uitofp_v4i32_f32:
@@ -5676,13 +5676,13 @@ define double @extract0_uitofp_v4i32_f64
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    xorps %xmm0, %xmm0
-; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; VEX-LABEL: extract0_uitofp_v4i32_f64:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vmovd %xmm0, %eax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm0
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: extract0_uitofp_v4i32_f64:
@@ -5740,14 +5740,14 @@ define double @extract3_sitofp_v4i32_f64
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %eax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extract3_sitofp_v4i32_f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    extractps $3, %xmm0, %eax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %eax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: extract3_sitofp_v4i32_f64:
@@ -5766,20 +5766,20 @@ define float @extract3_uitofp_v4i32_f32(
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extract3_uitofp_v4i32_f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    extractps $3, %xmm0, %eax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: extract3_uitofp_v4i32_f32:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vextractps $3, %xmm0, %eax
-; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; VEX-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm0
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: extract3_uitofp_v4i32_f32:
@@ -5820,20 +5820,20 @@ define double @extract3_uitofp_v4i32_f64
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; SSE2-NEXT:    movd %xmm0, %eax
 ; SSE2-NEXT:    xorps %xmm0, %xmm0
-; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: extract3_uitofp_v4i32_f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    extractps $3, %xmm0, %eax
 ; SSE41-NEXT:    xorps %xmm0, %xmm0
-; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; VEX-LABEL: extract3_uitofp_v4i32_f64:
 ; VEX:       # %bb.0:
 ; VEX-NEXT:    vextractps $3, %xmm0, %eax
-; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; VEX-NEXT:    vcvtsi2sd %rax, %xmm1, %xmm0
 ; VEX-NEXT:    retq
 ;
 ; AVX512F-LABEL: extract3_uitofp_v4i32_f64:

Modified: llvm/trunk/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll Mon May  6 14:39:51 2019
@@ -38,7 +38,7 @@ define x86_64_sysvcc float @foo(i32 %a0,
 ; CHECK-NEXT:  addl  %edx, %eax
 ; CHECK-NEXT:  addl  %ecx, %eax
 ; CHECK-NEXT:  xorps %xmm0, %xmm0
-; CHECK-NEXT:  cvtsi2ssl %eax, %xmm0
+; CHECK-NEXT:  cvtsi2ss %eax, %xmm0
 ; CHECK-NEXT:  addss %xmm0, %xmm1
 ; CHECK:       retq
 	%call = call i32 @bar(i32 %a0, i32 %a1, float %b0) #0

Modified: llvm/trunk/test/MC/Disassembler/X86/x86-64.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/X86/x86-64.txt?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/X86/x86-64.txt (original)
+++ llvm/trunk/test/MC/Disassembler/X86/x86-64.txt Mon May  6 14:39:51 2019
@@ -612,7 +612,7 @@
 0x62 0xb1 0x7c 0x00 0x58 0x08
 
 # Make sure we ignore EVEX.X when modrm.rm encodes a GPR.
-#CHECK: vcvtusi2sdq %rax, %xmm1, %xmm1
+#CHECK: vcvtusi2sd %rax, %xmm1, %xmm1
 0x62 0xb1 0xf7 0x08 0x7b 0xc8
 
 # Make sure we ignore EVEX.X when modrm.rm encodes a k-register.

Modified: llvm/trunk/test/MC/X86/AVX-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/AVX-64.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/AVX-64.s (original)
+++ llvm/trunk/test/MC/X86/AVX-64.s Mon May  6 14:39:51 2019
@@ -2928,11 +2928,11 @@ vcvtsi2sdl 64(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc5,0xcb,0x2a,0x72,0x40]      
 vcvtsi2sdl 64(%rdx), %xmm6, %xmm6 
 
-// CHECK: vcvtsi2sdl %r13d, %xmm15, %xmm15 
+// CHECK: vcvtsi2sd %r13d, %xmm15, %xmm15 
 // CHECK: encoding: [0xc4,0x41,0x03,0x2a,0xfd]      
 vcvtsi2sdl %r13d, %xmm15, %xmm15 
 
-// CHECK: vcvtsi2sdl %r13d, %xmm6, %xmm6 
+// CHECK: vcvtsi2sd %r13d, %xmm6, %xmm6 
 // CHECK: encoding: [0xc4,0xc1,0x4b,0x2a,0xf5]      
 vcvtsi2sdl %r13d, %xmm6, %xmm6 
 
@@ -2984,11 +2984,11 @@ vcvtsi2sdq 64(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0xe1,0xcb,0x2a,0x72,0x40]      
 vcvtsi2sdq 64(%rdx), %xmm6, %xmm6 
 
-// CHECK: vcvtsi2sdq %r15, %xmm15, %xmm15 
+// CHECK: vcvtsi2sd %r15, %xmm15, %xmm15 
 // CHECK: encoding: [0xc4,0x41,0x83,0x2a,0xff]      
 vcvtsi2sdq %r15, %xmm15, %xmm15 
 
-// CHECK: vcvtsi2sdq %r15, %xmm6, %xmm6 
+// CHECK: vcvtsi2sd %r15, %xmm6, %xmm6 
 // CHECK: encoding: [0xc4,0xc1,0xcb,0x2a,0xf7]      
 vcvtsi2sdq %r15, %xmm6, %xmm6 
 
@@ -3040,11 +3040,11 @@ vcvtsi2ssl 64(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc5,0xca,0x2a,0x72,0x40]      
 vcvtsi2ssl 64(%rdx), %xmm6, %xmm6 
 
-// CHECK: vcvtsi2ssl %r13d, %xmm15, %xmm15 
+// CHECK: vcvtsi2ss %r13d, %xmm15, %xmm15 
 // CHECK: encoding: [0xc4,0x41,0x02,0x2a,0xfd]      
 vcvtsi2ssl %r13d, %xmm15, %xmm15 
 
-// CHECK: vcvtsi2ssl %r13d, %xmm6, %xmm6 
+// CHECK: vcvtsi2ss %r13d, %xmm6, %xmm6 
 // CHECK: encoding: [0xc4,0xc1,0x4a,0x2a,0xf5]      
 vcvtsi2ssl %r13d, %xmm6, %xmm6 
 
@@ -3096,11 +3096,11 @@ vcvtsi2ssq 64(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0xe1,0xca,0x2a,0x72,0x40]      
 vcvtsi2ssq 64(%rdx), %xmm6, %xmm6 
 
-// CHECK: vcvtsi2ssq %r15, %xmm15, %xmm15 
+// CHECK: vcvtsi2ss %r15, %xmm15, %xmm15 
 // CHECK: encoding: [0xc4,0x41,0x82,0x2a,0xff]      
 vcvtsi2ssq %r15, %xmm15, %xmm15 
 
-// CHECK: vcvtsi2ssq %r15, %xmm6, %xmm6 
+// CHECK: vcvtsi2ss %r15, %xmm6, %xmm6 
 // CHECK: encoding: [0xc4,0xc1,0xca,0x2a,0xf7]      
 vcvtsi2ssq %r15, %xmm6, %xmm6 
 

Modified: llvm/trunk/test/MC/X86/AVX512F_SCALAR-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/AVX512F_SCALAR-64.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/AVX512F_SCALAR-64.s (original)
+++ llvm/trunk/test/MC/X86/AVX512F_SCALAR-64.s Mon May  6 14:39:51 2019
@@ -1472,11 +1472,11 @@ vcvtsi2sdl 485498096, %xmm15, %xmm15
 // CHECK: encoding: [0xc5,0xf3,0x2a,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
 vcvtsi2sdl 485498096, %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdl %r13d, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r13d, %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0x41,0x03,0x2a,0xfd]
 vcvtsi2sdl %r13d, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdl %r13d, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r13d, %xmm1, %xmm1
 // CHECK: encoding: [0xc4,0xc1,0x73,0x2a,0xcd]
 vcvtsi2sdl %r13d, %xmm1, %xmm1
 
@@ -1528,43 +1528,43 @@ vcvtsi2sdq 512(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0xe1,0xf3,0x2a,0x8a,0x00,0x02,0x00,0x00]
 vcvtsi2sdq 512(%rdx), %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdq %r15, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r15, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x38,0x2a,0xff]
 vcvtsi2sdq %r15, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdq %r15, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r15, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x38,0x2a,0xcf]
 vcvtsi2sdq %r15, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdq %r15, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r15, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x18,0x2a,0xff]
 vcvtsi2sdq %r15, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdq %r15, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r15, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x18,0x2a,0xcf]
 vcvtsi2sdq %r15, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdq %r15, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r15, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x58,0x2a,0xff]
 vcvtsi2sdq %r15, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdq %r15, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r15, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x58,0x2a,0xcf]
 vcvtsi2sdq %r15, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdq %r15, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r15, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x78,0x2a,0xff]
 vcvtsi2sdq %r15, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdq %r15, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r15, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x78,0x2a,0xcf]
 vcvtsi2sdq %r15, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2sdq %r15, %xmm15, %xmm15
+// CHECK: vcvtsi2sd %r15, %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0x41,0x83,0x2a,0xff]
 vcvtsi2sdq %r15, %xmm15, %xmm15
 
-// CHECK: vcvtsi2sdq %r15, %xmm1, %xmm1
+// CHECK: vcvtsi2sd %r15, %xmm1, %xmm1
 // CHECK: encoding: [0xc4,0xc1,0xf3,0x2a,0xcf]
 vcvtsi2sdq %r15, %xmm1, %xmm1
 
@@ -1616,43 +1616,43 @@ vcvtsi2ssl 485498096, %xmm15, %xmm15
 // CHECK: encoding: [0xc5,0xf2,0x2a,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
 vcvtsi2ssl 485498096, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssl %r13d, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r13d, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x38,0x2a,0xfd]
 vcvtsi2ssl %r13d, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r13d, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x38,0x2a,0xcd]
 vcvtsi2ssl %r13d, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssl %r13d, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r13d, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x18,0x2a,0xfd]
 vcvtsi2ssl %r13d, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r13d, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x18,0x2a,0xcd]
 vcvtsi2ssl %r13d, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssl %r13d, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r13d, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x58,0x2a,0xfd]
 vcvtsi2ssl %r13d, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r13d, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x58,0x2a,0xcd]
 vcvtsi2ssl %r13d, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssl %r13d, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r13d, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x78,0x2a,0xfd]
 vcvtsi2ssl %r13d, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r13d, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x78,0x2a,0xcd]
 vcvtsi2ssl %r13d, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssl %r13d, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r13d, %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0x41,0x02,0x2a,0xfd]
 vcvtsi2ssl %r13d, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r13d, %xmm1, %xmm1
 // CHECK: encoding: [0xc4,0xc1,0x72,0x2a,0xcd]
 vcvtsi2ssl %r13d, %xmm1, %xmm1
 
@@ -1704,43 +1704,43 @@ vcvtsi2ssq 512(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0xe1,0xf2,0x2a,0x8a,0x00,0x02,0x00,0x00]
 vcvtsi2ssq 512(%rdx), %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssq %r15, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r15, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x38,0x2a,0xff]
 vcvtsi2ssq %r15, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssq %r15, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r15, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x38,0x2a,0xcf]
 vcvtsi2ssq %r15, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssq %r15, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r15, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x18,0x2a,0xff]
 vcvtsi2ssq %r15, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssq %r15, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r15, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x18,0x2a,0xcf]
 vcvtsi2ssq %r15, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssq %r15, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r15, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x58,0x2a,0xff]
 vcvtsi2ssq %r15, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssq %r15, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r15, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x58,0x2a,0xcf]
 vcvtsi2ssq %r15, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssq %r15, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r15, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x78,0x2a,0xff]
 vcvtsi2ssq %r15, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssq %r15, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r15, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x78,0x2a,0xcf]
 vcvtsi2ssq %r15, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtsi2ssq %r15, %xmm15, %xmm15
+// CHECK: vcvtsi2ss %r15, %xmm15, %xmm15
 // CHECK: encoding: [0xc4,0x41,0x82,0x2a,0xff]
 vcvtsi2ssq %r15, %xmm15, %xmm15
 
-// CHECK: vcvtsi2ssq %r15, %xmm1, %xmm1
+// CHECK: vcvtsi2ss %r15, %xmm1, %xmm1
 // CHECK: encoding: [0xc4,0xc1,0xf2,0x2a,0xcf]
 vcvtsi2ssq %r15, %xmm1, %xmm1
 
@@ -2560,11 +2560,11 @@ vcvtusi2sdl 485498096, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0xf1,0x77,0x08,0x7b,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
 vcvtusi2sdl 485498096, %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdl %r13d, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r13d, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x07,0x08,0x7b,0xfd]
 vcvtusi2sdl %r13d, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdl %r13d, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r13d, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x77,0x08,0x7b,0xcd]
 vcvtusi2sdl %r13d, %xmm1, %xmm1
 
@@ -2616,43 +2616,43 @@ vcvtusi2sdq 512(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0x62,0xf1,0xf7,0x08,0x7b,0x4a,0x40]
 vcvtusi2sdq 512(%rdx), %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdq %r15, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r15, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x38,0x7b,0xff]
 vcvtusi2sdq %r15, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdq %r15, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r15, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x38,0x7b,0xcf]
 vcvtusi2sdq %r15, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdq %r15, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r15, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x18,0x7b,0xff]
 vcvtusi2sdq %r15, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdq %r15, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r15, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x18,0x7b,0xcf]
 vcvtusi2sdq %r15, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdq %r15, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r15, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x58,0x7b,0xff]
 vcvtusi2sdq %r15, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdq %r15, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r15, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x58,0x7b,0xcf]
 vcvtusi2sdq %r15, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdq %r15, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r15, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x78,0x7b,0xff]
 vcvtusi2sdq %r15, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdq %r15, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r15, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x78,0x7b,0xcf]
 vcvtusi2sdq %r15, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2sdq %r15, %xmm15, %xmm15
+// CHECK: vcvtusi2sd %r15, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x87,0x08,0x7b,0xff]
 vcvtusi2sdq %r15, %xmm15, %xmm15
 
-// CHECK: vcvtusi2sdq %r15, %xmm1, %xmm1
+// CHECK: vcvtusi2sd %r15, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf7,0x08,0x7b,0xcf]
 vcvtusi2sdq %r15, %xmm1, %xmm1
 
@@ -2704,43 +2704,43 @@ vcvtusi2ssl 485498096, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0xf1,0x76,0x08,0x7b,0x0c,0x25,0xf0,0x1c,0xf0,0x1c]
 vcvtusi2ssl 485498096, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssl %r13d, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r13d, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x38,0x7b,0xfd]
 vcvtusi2ssl %r13d, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssl %r13d, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r13d, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x38,0x7b,0xcd]
 vcvtusi2ssl %r13d, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssl %r13d, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r13d, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x18,0x7b,0xfd]
 vcvtusi2ssl %r13d, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssl %r13d, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r13d, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x18,0x7b,0xcd]
 vcvtusi2ssl %r13d, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssl %r13d, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r13d, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x58,0x7b,0xfd]
 vcvtusi2ssl %r13d, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssl %r13d, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r13d, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x58,0x7b,0xcd]
 vcvtusi2ssl %r13d, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssl %r13d, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r13d, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x78,0x7b,0xfd]
 vcvtusi2ssl %r13d, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssl %r13d, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r13d, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x78,0x7b,0xcd]
 vcvtusi2ssl %r13d, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssl %r13d, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r13d, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x06,0x08,0x7b,0xfd]
 vcvtusi2ssl %r13d, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssl %r13d, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r13d, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0x76,0x08,0x7b,0xcd]
 vcvtusi2ssl %r13d, %xmm1, %xmm1
 
@@ -2792,43 +2792,43 @@ vcvtusi2ssq 512(%rdx), %xmm15, %xmm15
 // CHECK: encoding: [0x62,0xf1,0xf6,0x08,0x7b,0x4a,0x40]
 vcvtusi2ssq 512(%rdx), %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssq %r15, {rd-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r15, {rd-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x38,0x7b,0xff]
 vcvtusi2ssq %r15, {rd-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssq %r15, {rd-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r15, {rd-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x38,0x7b,0xcf]
 vcvtusi2ssq %r15, {rd-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssq %r15, {rn-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r15, {rn-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x18,0x7b,0xff]
 vcvtusi2ssq %r15, {rn-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssq %r15, {rn-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r15, {rn-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x18,0x7b,0xcf]
 vcvtusi2ssq %r15, {rn-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssq %r15, {ru-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r15, {ru-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x58,0x7b,0xff]
 vcvtusi2ssq %r15, {ru-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssq %r15, {ru-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r15, {ru-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x58,0x7b,0xcf]
 vcvtusi2ssq %r15, {ru-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssq %r15, {rz-sae}, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r15, {rz-sae}, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x78,0x7b,0xff]
 vcvtusi2ssq %r15, {rz-sae}, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssq %r15, {rz-sae}, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r15, {rz-sae}, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x78,0x7b,0xcf]
 vcvtusi2ssq %r15, {rz-sae}, %xmm1, %xmm1
 
-// CHECK: vcvtusi2ssq %r15, %xmm15, %xmm15
+// CHECK: vcvtusi2ss %r15, %xmm15, %xmm15
 // CHECK: encoding: [0x62,0x51,0x86,0x08,0x7b,0xff]
 vcvtusi2ssq %r15, %xmm15, %xmm15
 
-// CHECK: vcvtusi2ssq %r15, %xmm1, %xmm1
+// CHECK: vcvtusi2ss %r15, %xmm1, %xmm1
 // CHECK: encoding: [0x62,0xd1,0xf6,0x08,0x7b,0xcf]
 vcvtusi2ssq %r15, %xmm1, %xmm1
 

Modified: llvm/trunk/test/MC/X86/SSE-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/SSE-64.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/SSE-64.s (original)
+++ llvm/trunk/test/MC/X86/SSE-64.s Mon May  6 14:39:51 2019
@@ -272,7 +272,7 @@ cvtsi2ssl 64(%rdx,%rax), %xmm6
 // CHECK: encoding: [0xf3,0x0f,0x2a,0x72,0x40]
 cvtsi2ssl 64(%rdx), %xmm6
 
-// CHECK: cvtsi2ssl %r13d, %xmm6
+// CHECK: cvtsi2ss %r13d, %xmm6
 // CHECK: encoding: [0xf3,0x41,0x0f,0x2a,0xf5]
 cvtsi2ssl %r13d, %xmm6
 
@@ -300,7 +300,7 @@ cvtsi2ssq 64(%rdx,%rax), %xmm6
 // CHECK: encoding: [0xf3,0x48,0x0f,0x2a,0x72,0x40]
 cvtsi2ssq 64(%rdx), %xmm6
 
-// CHECK: cvtsi2ssq %r15, %xmm6
+// CHECK: cvtsi2ss %r15, %xmm6
 // CHECK: encoding: [0xf3,0x49,0x0f,0x2a,0xf7]
 cvtsi2ssq %r15, %xmm6
 

Modified: llvm/trunk/test/MC/X86/SSE2-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/SSE2-64.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/SSE2-64.s (original)
+++ llvm/trunk/test/MC/X86/SSE2-64.s Mon May  6 14:39:51 2019
@@ -524,7 +524,7 @@ cvtsi2sdl 64(%rdx,%rax), %xmm6
 // CHECK: encoding: [0xf2,0x0f,0x2a,0x72,0x40]
 cvtsi2sdl 64(%rdx), %xmm6
 
-// CHECK: cvtsi2sdl %r13d, %xmm6
+// CHECK: cvtsi2sd %r13d, %xmm6
 // CHECK: encoding: [0xf2,0x41,0x0f,0x2a,0xf5]
 cvtsi2sdl %r13d, %xmm6
 
@@ -552,7 +552,7 @@ cvtsi2sdq 64(%rdx,%rax), %xmm6
 // CHECK: encoding: [0xf2,0x48,0x0f,0x2a,0x72,0x40]
 cvtsi2sdq 64(%rdx), %xmm6
 
-// CHECK: cvtsi2sdq %r15, %xmm6
+// CHECK: cvtsi2sd %r15, %xmm6
 // CHECK: encoding: [0xf2,0x49,0x0f,0x2a,0xf7]
 cvtsi2sdq %r15, %xmm6
 

Modified: llvm/trunk/test/MC/X86/avx512-encodings.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512-encodings.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/avx512-encodings.s (original)
+++ llvm/trunk/test/MC/X86/avx512-encodings.s Mon May  6 14:39:51 2019
@@ -8796,15 +8796,15 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0xe2,0x1d,0x50,0x36,0xb2,0xfc,0xfd,0xff,0xff]
           vpermd -516(%rdx){1to16}, %zmm28, %zmm22
 
-// CHECK:  vcvtsi2sdl %eax, %xmm10, %xmm7
+// CHECK:  vcvtsi2sd %eax, %xmm10, %xmm7
 // CHECK:  encoding: [0xc5,0xab,0x2a,0xf8]
           vcvtsi2sd %eax, %xmm10, %xmm7
 
-// CHECK: vcvtsi2sdl %ebp, %xmm10, %xmm7
+// CHECK: vcvtsi2sd %ebp, %xmm10, %xmm7
 // CHECK:  encoding: [0xc5,0xab,0x2a,0xfd]
           vcvtsi2sd %ebp, %xmm10, %xmm7
 
-// CHECK: vcvtsi2sdl %r13d, %xmm10, %xmm7
+// CHECK: vcvtsi2sd %r13d, %xmm10, %xmm7
 // CHECK:  encoding: [0xc4,0xc1,0x2b,0x2a,0xfd]
           vcvtsi2sd %r13d, %xmm10, %xmm7
 
@@ -8836,43 +8836,43 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0xc5,0xab,0x2a,0xba,0xfc,0xfd,0xff,0xff]
           vcvtsi2sd -516(%rdx), %xmm10, %xmm7
 
-// CHECK: vcvtsi2sdq %rax, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %rax, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x61,0x9f,0x08,0x2a,0xe8]
           vcvtsi2sd %rax, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %rax,  {rn-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %rax,  {rn-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x61,0x9f,0x18,0x2a,0xe8]
           vcvtsi2sd %rax,  {rn-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %rax,  {ru-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %rax,  {ru-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x61,0x9f,0x58,0x2a,0xe8]
           vcvtsi2sd %rax,  {ru-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %rax,  {rd-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %rax,  {rd-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x61,0x9f,0x38,0x2a,0xe8]
           vcvtsi2sd %rax,  {rd-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %rax,  {rz-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %rax,  {rz-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x61,0x9f,0x78,0x2a,0xe8]
           vcvtsi2sd %rax,  {rz-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %r8, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %r8, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x41,0x9f,0x08,0x2a,0xe8]
           vcvtsi2sd %r8, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %r8,  {rn-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %r8,  {rn-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x41,0x9f,0x18,0x2a,0xe8]
           vcvtsi2sd %r8,  {rn-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %r8,  {ru-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %r8,  {ru-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x41,0x9f,0x58,0x2a,0xe8]
           vcvtsi2sd %r8,  {ru-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %r8,  {rd-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %r8,  {rd-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x41,0x9f,0x38,0x2a,0xe8]
           vcvtsi2sd %r8,  {rd-sae}, %xmm12, %xmm29
 
-// CHECK: vcvtsi2sdq %r8,  {rz-sae}, %xmm12, %xmm29
+// CHECK: vcvtsi2sd %r8,  {rz-sae}, %xmm12, %xmm29
 // CHECK:  encoding: [0x62,0x41,0x9f,0x78,0x2a,0xe8]
           vcvtsi2sd %r8,  {rz-sae}, %xmm12, %xmm29
 
@@ -8900,63 +8900,63 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0x61,0x9f,0x08,0x2a,0xaa,0xf8,0xfb,0xff,0xff]
           vcvtsi2sdq -1032(%rdx), %xmm12, %xmm29
 
-// CHECK: vcvtsi2ssl %eax, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %eax, %xmm10, %xmm15
 // CHECK:  encoding: [0xc5,0x2a,0x2a,0xf8]
           vcvtsi2ss %eax, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %eax,  {rn-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %eax,  {rn-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x18,0x2a,0xf8]
           vcvtsi2ss %eax,  {rn-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %eax,  {ru-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %eax,  {ru-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x58,0x2a,0xf8]
           vcvtsi2ss %eax,  {ru-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %eax,  {rd-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %eax,  {rd-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x38,0x2a,0xf8]
           vcvtsi2ss %eax,  {rd-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %eax,  {rz-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %eax,  {rz-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x78,0x2a,0xf8]
           vcvtsi2ss %eax,  {rz-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %ebp, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %ebp, %xmm10, %xmm15
 // CHECK:  encoding: [0xc5,0x2a,0x2a,0xfd]
           vcvtsi2ss %ebp, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %ebp,  {rn-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %ebp,  {rn-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x18,0x2a,0xfd]
           vcvtsi2ss %ebp,  {rn-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %ebp,  {ru-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %ebp,  {ru-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x58,0x2a,0xfd]
           vcvtsi2ss %ebp,  {ru-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %ebp,  {rd-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %ebp,  {rd-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x38,0x2a,0xfd]
           vcvtsi2ss %ebp,  {rd-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %ebp,  {rz-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %ebp,  {rz-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x71,0x2e,0x78,0x2a,0xfd]
           vcvtsi2ss %ebp,  {rz-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %r13d, %xmm10, %xmm15
 // CHECK:  encoding: [0xc4,0x41,0x2a,0x2a,0xfd]
           vcvtsi2ss %r13d, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d,  {rn-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %r13d,  {rn-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x51,0x2e,0x18,0x2a,0xfd]
           vcvtsi2ss %r13d,  {rn-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d,  {ru-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %r13d,  {ru-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x51,0x2e,0x58,0x2a,0xfd]
           vcvtsi2ss %r13d,  {ru-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d,  {rd-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %r13d,  {rd-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x51,0x2e,0x38,0x2a,0xfd]
           vcvtsi2ss %r13d,  {rd-sae}, %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssl %r13d,  {rz-sae}, %xmm10, %xmm15
+// CHECK: vcvtsi2ss %r13d,  {rz-sae}, %xmm10, %xmm15
 // CHECK:  encoding: [0x62,0x51,0x2e,0x78,0x2a,0xfd]
           vcvtsi2ss %r13d,  {rz-sae}, %xmm10, %xmm15
 
@@ -8988,43 +8988,43 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0xc5,0x2a,0x2a,0xba,0xfc,0xfd,0xff,0xff]
           vcvtsi2ss -516(%rdx), %xmm10, %xmm15
 
-// CHECK: vcvtsi2ssq %rax, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %rax, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xe1,0xae,0x08,0x2a,0xc0]
           vcvtsi2ss %rax, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %rax,  {rn-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %rax,  {rn-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xe1,0xae,0x18,0x2a,0xc0]
           vcvtsi2ss %rax,  {rn-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %rax,  {ru-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %rax,  {ru-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xe1,0xae,0x58,0x2a,0xc0]
           vcvtsi2ss %rax,  {ru-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %rax,  {rd-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %rax,  {rd-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xe1,0xae,0x38,0x2a,0xc0]
           vcvtsi2ss %rax,  {rd-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %rax,  {rz-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %rax,  {rz-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xe1,0xae,0x78,0x2a,0xc0]
           vcvtsi2ss %rax,  {rz-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %r8, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %r8, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xc1,0xae,0x08,0x2a,0xc0]
           vcvtsi2ss %r8, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %r8,  {rn-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %r8,  {rn-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xc1,0xae,0x18,0x2a,0xc0]
           vcvtsi2ss %r8,  {rn-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %r8,  {ru-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %r8,  {ru-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xc1,0xae,0x58,0x2a,0xc0]
           vcvtsi2ss %r8,  {ru-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %r8,  {rd-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %r8,  {rd-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xc1,0xae,0x38,0x2a,0xc0]
           vcvtsi2ss %r8,  {rd-sae}, %xmm10, %xmm16
 
-// CHECK: vcvtsi2ssq %r8,  {rz-sae}, %xmm10, %xmm16
+// CHECK: vcvtsi2ss %r8,  {rz-sae}, %xmm10, %xmm16
 // CHECK:  encoding: [0x62,0xc1,0xae,0x78,0x2a,0xc0]
           vcvtsi2ss %r8,  {rz-sae}, %xmm10, %xmm16
 
@@ -9052,15 +9052,15 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0xe1,0xae,0x08,0x2a,0x82,0xf8,0xfb,0xff,0xff]
           vcvtsi2ssq -1032(%rdx), %xmm10, %xmm16
 
-// CHECK:  vcvtusi2sdl  %eax, %xmm1, %xmm19
+// CHECK:  vcvtusi2sd  %eax, %xmm1, %xmm19
 // CHECK:  encoding: [0x62,0xe1,0x77,0x08,0x7b,0xd8]
           vcvtusi2sd %eax, %xmm1, %xmm19
 
-// CHECK:  vcvtusi2sdl  %ebp, %xmm1, %xmm19
+// CHECK:  vcvtusi2sd  %ebp, %xmm1, %xmm19
 // CHECK:  encoding: [0x62,0xe1,0x77,0x08,0x7b,0xdd]
           vcvtusi2sd %ebp, %xmm1, %xmm19
 
-// CHECK:  vcvtusi2sdl  %r13d, %xmm1, %xmm19
+// CHECK:  vcvtusi2sd  %r13d, %xmm1, %xmm19
 // CHECK:  encoding: [0x62,0xc1,0x77,0x08,0x7b,0xdd]
           vcvtusi2sd %r13d, %xmm1, %xmm19
 
@@ -9092,43 +9092,43 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0xe1,0x77,0x08,0x7b,0x9a,0xfc,0xfd,0xff,0xff]
           vcvtusi2sd -516(%rdx), %xmm1, %xmm19
 
-// CHECK: vcvtusi2sdq %rax, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %rax, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xaf,0x00,0x7b,0xf0]
           vcvtusi2sd %rax, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %rax, {rn-sae}, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %rax, {rn-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xaf,0x10,0x7b,0xf0]
           vcvtusi2sd %rax, {rn-sae}, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %rax, {ru-sae}, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %rax, {ru-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xaf,0x50,0x7b,0xf0]
           vcvtusi2sd %rax, {ru-sae}, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %rax, {rd-sae}, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %rax, {rd-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xaf,0x30,0x7b,0xf0]
           vcvtusi2sd %rax, {rd-sae}, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %rax, {rz-sae}, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %rax, {rz-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xaf,0x70,0x7b,0xf0]
           vcvtusi2sd %rax, {rz-sae}, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %r8, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %r8, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xaf,0x00,0x7b,0xf0]
           vcvtusi2sd %r8, %xmm26, %xmm14
 
-// CHECK: vcvtusi2sdq %r8, {rn-sae}, %xmm26, %xmm14
+// CHECK: vcvtusi2sd %r8, {rn-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xaf,0x10,0x7b,0xf0]
           vcvtusi2sd %r8, {rn-sae}, %xmm26, %xmm14
 
-// CHECK:  vcvtusi2sdq  %r8, {ru-sae}, %xmm26, %xmm14
+// CHECK:  vcvtusi2sd  %r8, {ru-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xaf,0x50,0x7b,0xf0]
           vcvtusi2sd %r8, {ru-sae}, %xmm26, %xmm14
 
-// CHECK:  vcvtusi2sdq  %r8, {rd-sae}, %xmm26, %xmm14
+// CHECK:  vcvtusi2sd  %r8, {rd-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xaf,0x30,0x7b,0xf0]
           vcvtusi2sd %r8, {rd-sae}, %xmm26, %xmm14
 
-// CHECK:  vcvtusi2sdq  %r8, {rz-sae}, %xmm26, %xmm14
+// CHECK:  vcvtusi2sd  %r8, {rz-sae}, %xmm26, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xaf,0x70,0x7b,0xf0]
           vcvtusi2sd %r8, {rz-sae}, %xmm26, %xmm14
 
@@ -9156,63 +9156,63 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0x71,0xaf,0x00,0x7b,0xb2,0xf8,0xfb,0xff,0xff]
           vcvtusi2sdq -1032(%rdx), %xmm26, %xmm14
 
-// CHECK: vcvtusi2ssl %eax, %xmm26, %xmm5
+// CHECK: vcvtusi2ss %eax, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x00,0x7b,0xe8]
           vcvtusi2ss %eax, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %eax, {rn-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %eax, {rn-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x10,0x7b,0xe8]
           vcvtusi2ss %eax, {rn-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %eax, {ru-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %eax, {ru-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x50,0x7b,0xe8]
           vcvtusi2ss %eax, {ru-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %eax, {rd-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %eax, {rd-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x30,0x7b,0xe8]
           vcvtusi2ss %eax, {rd-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %eax, {rz-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %eax, {rz-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x70,0x7b,0xe8]
           vcvtusi2ss %eax, {rz-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %ebp, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %ebp, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x00,0x7b,0xed]
           vcvtusi2ss %ebp, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %ebp, {rn-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %ebp, {rn-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x10,0x7b,0xed]
           vcvtusi2ss %ebp, {rn-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %ebp, {ru-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %ebp, {ru-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x50,0x7b,0xed]
           vcvtusi2ss %ebp, {ru-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %ebp, {rd-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %ebp, {rd-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x30,0x7b,0xed]
           vcvtusi2ss %ebp, {rd-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %ebp, {rz-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %ebp, {rz-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x70,0x7b,0xed]
           vcvtusi2ss %ebp, {rz-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %r13d, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %r13d, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xd1,0x2e,0x00,0x7b,0xed]
           vcvtusi2ss %r13d, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %r13d, {rn-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %r13d, {rn-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xd1,0x2e,0x10,0x7b,0xed]
           vcvtusi2ss %r13d, {rn-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %r13d, {ru-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %r13d, {ru-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xd1,0x2e,0x50,0x7b,0xed]
           vcvtusi2ss %r13d, {ru-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %r13d, {rd-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %r13d, {rd-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xd1,0x2e,0x30,0x7b,0xed]
           vcvtusi2ss %r13d, {rd-sae}, %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssl  %r13d, {rz-sae}, %xmm26, %xmm5
+// CHECK:  vcvtusi2ss  %r13d, {rz-sae}, %xmm26, %xmm5
 // CHECK:  encoding: [0x62,0xd1,0x2e,0x70,0x7b,0xed]
           vcvtusi2ss %r13d, {rz-sae}, %xmm26, %xmm5
 
@@ -9244,43 +9244,43 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
 // CHECK:  encoding: [0x62,0xf1,0x2e,0x00,0x7b,0xaa,0xfc,0xfd,0xff,0xff]
           vcvtusi2ss -516(%rdx), %xmm26, %xmm5
 
-// CHECK:  vcvtusi2ssq  %rax, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %rax, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xce,0x00,0x7b,0xf0]
           vcvtusi2ss %rax, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %rax, {rn-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %rax, {rn-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xce,0x10,0x7b,0xf0]
           vcvtusi2ss %rax, {rn-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %rax, {ru-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %rax, {ru-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xce,0x50,0x7b,0xf0]
           vcvtusi2ss %rax, {ru-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %rax, {rd-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %rax, {rd-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xce,0x30,0x7b,0xf0]
           vcvtusi2ss %rax, {rd-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %rax, {rz-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %rax, {rz-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x71,0xce,0x70,0x7b,0xf0]
           vcvtusi2ss %rax, {rz-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %r8, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %r8, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xce,0x00,0x7b,0xf0]
           vcvtusi2ss %r8, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %r8, {rn-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %r8, {rn-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xce,0x10,0x7b,0xf0]
           vcvtusi2ss %r8, {rn-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %r8, {ru-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %r8, {ru-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xce,0x50,0x7b,0xf0]
           vcvtusi2ss %r8, {ru-sae}, %xmm22, %xmm14
 
-// CHECK:  vcvtusi2ssq  %r8, {rd-sae}, %xmm22, %xmm14
+// CHECK:  vcvtusi2ss  %r8, {rd-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xce,0x30,0x7b,0xf0]
           vcvtusi2ss %r8, {rd-sae}, %xmm22, %xmm14
 
-// CHECK: vcvtusi2ssq %r8, {rz-sae}, %xmm22, %xmm14
+// CHECK: vcvtusi2ss %r8, {rz-sae}, %xmm22, %xmm14
 // CHECK:  encoding: [0x62,0x51,0xce,0x70,0x7b,0xf0]
           vcvtusi2ss %r8, {rz-sae}, %xmm22, %xmm14
 

Modified: llvm/trunk/test/MC/X86/x86-32-coverage.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86-32-coverage.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86-32-coverage.s (original)
+++ llvm/trunk/test/MC/X86/x86-32-coverage.s Mon May  6 14:39:51 2019
@@ -5552,7 +5552,7 @@
 // CHECK:  encoding: [0x0f,0x2d,0xdd]
         	cvtps2pi	%xmm5,%mm3
 
-// CHECK: cvtsi2ssl	%ecx, %xmm5
+// CHECK: cvtsi2ss	%ecx, %xmm5
 // CHECK:  encoding: [0xf3,0x0f,0x2a,0xe9]
         	cvtsi2ssl	%ecx,%xmm5
 
@@ -7056,7 +7056,7 @@
 // CHECK:  encoding: [0x66,0x0f,0x2a,0xeb]
         	cvtpi2pd	%mm3,%xmm5
 
-// CHECK: cvtsi2sdl	%ecx, %xmm5
+// CHECK: cvtsi2sd	%ecx, %xmm5
 // CHECK:  encoding: [0xf2,0x0f,0x2a,0xe9]
         	cvtsi2sdl	%ecx,%xmm5
 

Modified: llvm/trunk/test/MC/X86/x86_64-avx-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86_64-avx-encoding.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86_64-avx-encoding.s (original)
+++ llvm/trunk/test/MC/X86/x86_64-avx-encoding.s Mon May  6 14:39:51 2019
@@ -3880,7 +3880,7 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc4,0x61,0xfa,0x2d,0x01]
           vcvtss2si  (%rcx), %r8
 
-// CHECK: vcvtsi2sdl  %r8d, %xmm8, %xmm15
+// CHECK: vcvtsi2sd  %r8d, %xmm8, %xmm15
 // CHECK: encoding: [0xc4,0x41,0x3b,0x2a,0xf8]
           vcvtsi2sdl  %r8d, %xmm8, %xmm15
 
@@ -3888,7 +3888,7 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc5,0x3b,0x2a,0x7d,0x00]
           vcvtsi2sdl  (%rbp), %xmm8, %xmm15
 
-// CHECK: vcvtsi2sdq  %rcx, %xmm4, %xmm6
+// CHECK: vcvtsi2sd  %rcx, %xmm4, %xmm6
 // CHECK: encoding: [0xc4,0xe1,0xdb,0x2a,0xf1]
           vcvtsi2sdq  %rcx, %xmm4, %xmm6
 
@@ -3896,7 +3896,7 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc4,0xe1,0xdb,0x2a,0x31]
           vcvtsi2sdq  (%rcx), %xmm4, %xmm6
 
-// CHECK: vcvtsi2ssq  %rcx, %xmm4, %xmm6
+// CHECK: vcvtsi2ss  %rcx, %xmm4, %xmm6
 // CHECK: encoding: [0xc4,0xe1,0xda,0x2a,0xf1]
           vcvtsi2ssq  %rcx, %xmm4, %xmm6
 

Modified: llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  1      5     5.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      5     5.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  1      5     5.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  1      6     3.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  1      6     3.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  1      6     3.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  1      6     3.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  1      7     3.50    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      7     3.50    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      8     4.00                        cvtss2si	%xmm0, %ecx
@@ -347,8 +347,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -     5.00   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -     5.00   cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT: 5.00   5.00   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT: 3.00   3.00   cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT: 3.00   3.00   cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT: 3.00   3.00   cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT: 3.00   3.00   cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT: 3.50   3.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT: 3.50   3.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT: 4.00   4.00   cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Atom/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  1      9     4.50    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      6     3.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  1      7     3.50    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  1      6     3.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  1      6     3.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  1      6     3.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  1      6     3.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  1      7     3.50    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      7     3.50    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      6     3.00                        cvtss2sd	%xmm0, %xmm2
@@ -722,8 +722,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT: 4.50   4.50   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT: 3.00   3.00   cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT: 3.50   3.50   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT: 3.00   3.00   cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT: 3.00   3.00   cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT: 3.00   3.00   cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT: 3.00   3.00   cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT: 3.50   3.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT: 3.50   3.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT: 3.00   3.00   cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/BdVer2/int-to-fpu-forwarding-2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BdVer2/int-to-fpu-forwarding-2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BdVer2/int-to-fpu-forwarding-2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BdVer2/int-to-fpu-forwarding-2.s Mon May  6 14:39:51 2019
@@ -46,7 +46,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0.0] - PdAGLU01
@@ -79,7 +79,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0.0]  [0.1]  [1]    [2]    [3]    [4]    [5]    [6]    [7.0]  [7.1]  [8.0]  [8.1]  [9]    [10]   [11]   [12]   [13]   [14]   [15]   [16.0] [16.1] [17]   [18]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ss	%ecx, %xmm0, %xmm0
 
 # CHECK:      [1] Code Region
 
@@ -102,7 +102,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0.0] - PdAGLU01
@@ -135,7 +135,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0.0]  [0.1]  [1]    [2]    [3]    [4]    [5]    [6]    [7.0]  [7.1]  [8.0]  [8.1]  [9]    [10]   [11]   [12]   [13]   [14]   [15]   [16.0] [16.1] [17]   [18]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sd	%ecx, %xmm0, %xmm0
 
 # CHECK:      [2] Code Region
 
@@ -158,7 +158,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      13    1.00                        cvtsi2ssl	%ecx, %xmm0
+# CHECK-NEXT:  2      13    1.00                        cvtsi2ss	%ecx, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0.0] - PdAGLU01
@@ -191,7 +191,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0.0]  [0.1]  [1]    [2]    [3]    [4]    [5]    [6]    [7.0]  [7.1]  [8.0]  [8.1]  [9]    [10]   [11]   [12]   [13]   [14]   [15]   [16.0] [16.1] [17]   [18]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ssl	%ecx, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ss	%ecx, %xmm0
 
 # CHECK:      [3] Code Region
 
@@ -214,7 +214,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      13    1.00                        cvtsi2sdl	%ecx, %xmm0
+# CHECK-NEXT:  2      13    1.00                        cvtsi2sd	%ecx, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0.0] - PdAGLU01
@@ -247,7 +247,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0.0]  [0.1]  [1]    [2]    [3]    [4]    [5]    [6]    [7.0]  [7.1]  [8.0]  [8.1]  [9]    [10]   [11]   [12]   [13]   [14]   [15]   [16.0] [16.1] [17]   [18]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sdl	%ecx, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sd	%ecx, %xmm0
 
 # CHECK:      [4] Code Region
 

Modified: llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  2      18    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      14    1.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      14    1.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      4     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1867,12 +1867,12 @@ vzeroupper
 # CHECK-NEXT: 0.50   0.50    -      -      -     1.00    -      -     0.50   0.50    -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      4     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      13    1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  2      13    1.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      13    1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  2      13    1.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      13    1.00                        cvtss2si	%xmm0, %ecx
@@ -368,8 +368,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -     1.00    -      -     0.50   0.50    -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BdVer2/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  2      18    1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      13    1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      13    1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      13    1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      13    1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      4     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -743,8 +743,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -     1.00    -      -     0.50   0.50    -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -     0.50   0.50    -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -      -      -      -     1.00    -     1.00    -      -      -      -      -      -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      9     1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      2     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1854,12 +1854,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -     1.00    -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -     1.00    -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      8     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  2      4     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      8     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      4     1.00                        cvtss2si	%xmm0, %ecx
@@ -355,8 +355,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -      -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      2     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -730,8 +730,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -     1.00    -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -     1.00    -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s Mon May  6 14:39:51 2019
@@ -46,7 +46,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0]   - JALU0
@@ -70,7 +70,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0]    [1]    [2]    [3]    [4]    [5]    [6]    [7]    [8]    [9]    [10]   [11]   [12]   [13]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ss	%ecx, %xmm0, %xmm0
 
 # CHECK:      [1] Code Region
 
@@ -93,7 +93,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0]   - JALU0
@@ -117,7 +117,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0]    [1]    [2]    [3]    [4]    [5]    [6]    [7]    [8]    [9]    [10]   [11]   [12]   [13]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sd	%ecx, %xmm0, %xmm0
 
 # CHECK:      [2] Code Region
 
@@ -140,7 +140,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      10    1.00                        cvtsi2ssl	%ecx, %xmm0
+# CHECK-NEXT:  2      10    1.00                        cvtsi2ss	%ecx, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0]   - JALU0
@@ -164,7 +164,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0]    [1]    [2]    [3]    [4]    [5]    [6]    [7]    [8]    [9]    [10]   [11]   [12]   [13]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ssl	%ecx, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ss	%ecx, %xmm0
 
 # CHECK:      [3] Code Region
 
@@ -187,7 +187,7 @@ movq %rcx, %xmm0
 # CHECK-NEXT: [6]: HasSideEffects (U)
 
 # CHECK:      [1]    [2]    [3]    [4]    [5]    [6]    Instructions:
-# CHECK-NEXT:  2      10    1.00                        cvtsi2sdl	%ecx, %xmm0
+# CHECK-NEXT:  2      10    1.00                        cvtsi2sd	%ecx, %xmm0
 
 # CHECK:      Resources:
 # CHECK-NEXT: [0]   - JALU0
@@ -211,7 +211,7 @@ movq %rcx, %xmm0
 
 # CHECK:      Resource pressure by instruction:
 # CHECK-NEXT: [0]    [1]    [2]    [3]    [4]    [5]    [6]    [7]    [8]    [9]    [10]   [11]   [12]   [13]   Instructions:
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sdl	%ecx, %xmm0
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sd	%ecx, %xmm0
 
 # CHECK:      [4] Code Region
 

Modified: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  2      12    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      7     2.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  2      12    2.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      10    1.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      10    1.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      7     2.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1858,12 +1858,12 @@ vzeroupper
 # CHECK-NEXT: 1.00    -      -     1.00    -     1.00   1.00   1.00    -      -     1.00    -      -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     2.00    -      -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     2.00    -      -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     2.00    -      -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  1      8     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      3     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  1      8     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      10    1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  2      10    1.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      10    1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  2      10    1.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      7     1.00                        cvtss2si	%xmm0, %ecx
@@ -359,8 +359,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT: 1.00    -      -     1.00    -     1.00   1.00    -      -      -     1.00    -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/BtVer2/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  2      12    1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      7     2.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  2      12    2.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      10    1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      10    1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      10    1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      10    1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      7     2.00                        cvtss2sd	%xmm0, %xmm2
@@ -734,8 +734,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT: 1.00    -      -     1.00    -     1.00   1.00   1.00    -      -     1.00    -      -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     2.00    -      -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     2.00    -      -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     1.00    -      -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00   1.00    -      -     1.00    -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -     1.00    -      -      -     2.00    -      -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      1     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1852,12 +1852,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.00   1.00    -      -     0.50   0.50   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      3     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtss2si	%xmm0, %ecx
@@ -353,8 +353,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -      -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Generic/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      1     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -728,8 +728,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -     0.50   0.50   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      9     1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      2     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1854,12 +1854,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -     1.00    -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -     1.00    -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      8     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  2      4     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      8     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      4     1.00                        cvtss2si	%xmm0, %ecx
@@ -355,8 +355,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     2.00    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -      -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Haswell/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      2     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -730,8 +730,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -     1.00    -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -      -      -     1.00    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -     1.00    -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      4     0.50                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  1      4     0.50                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  1      4     0.50                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  1      4     0.50                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  1      4     0.50                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      4     0.50                        cvtss2si	%xmm0, %ecx
@@ -353,8 +353,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SLM/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      4     0.50                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  1      4     0.50                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  1      4     0.50                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  1      4     0.50                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  1      4     0.50                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      7     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      4     0.50                        cvtss2sd	%xmm0, %xmm2
@@ -728,8 +728,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -     1.00   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     0.50   0.50    -      -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      4     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      4     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      5     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      5     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      1     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1852,12 +1852,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.00   1.00    -      -     0.50   0.50   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      3     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      5     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      5     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtss2si	%xmm0, %ecx
@@ -353,8 +353,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -      -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     2.00    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SandyBridge/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      9     1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      4     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      4     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      1     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -728,8 +728,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -     0.50   0.50   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -     1.00   0.50   0.50   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -     1.00    -     1.00    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00    -      -     0.50   0.50   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -      -      -      -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      11    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      5     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      6     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      6     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      5     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1854,12 +1854,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.50   0.50   0.50   0.50    -      -      -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -     0.50   0.50   0.50   0.50    -     1.00    -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.50   0.50    -      -      -     2.00    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.50   0.50    -      -      -     2.00    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      9     0.50    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      6     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      6     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      6     1.00                        cvtss2si	%xmm0, %ecx
@@ -355,8 +355,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -     0.50   0.50    -      -      -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -     0.50   0.50   0.50   0.50    -      -      -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -     0.50   0.50    -      -      -     2.00    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -     0.50   0.50    -      -      -     2.00    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.50   0.50    -      -      -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeClient/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      11    1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      5     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -730,8 +730,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.50   0.50   0.50   0.50    -      -      -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -     0.50   0.50   0.50   0.50    -     1.00    -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  3      11    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      5     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  2      5     1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  3      6     2.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  2      5     1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  3      6     2.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  2      5     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1854,12 +1854,12 @@ vzeroupper
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33   0.50   0.50    -     1.33    -      -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     2.33    -      -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     2.33    -      -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  2      9     0.50    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  3      6     2.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  3      6     2.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  2      6     1.00                        cvtss2si	%xmm0, %ecx
@@ -355,8 +355,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00    -     0.50   0.50    -      -      -      -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  -      -     0.33   0.33   0.50   0.50    -     0.33    -      -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     2.33    -      -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     2.33    -      -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00    -      -      -      -      -      -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/SkylakeServer/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  3      11    1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  2      5     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  3      10    1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  2      5     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  2      5     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      9     1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  2      5     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -730,8 +730,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  -      -     1.00   1.00   0.50   0.50    -      -      -      -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33   0.50   0.50    -     1.33    -      -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -     1.00   0.50   0.50    -      -      -      -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -     0.33   0.33    -      -      -     1.33    -      -     cvtss2sd	%xmm0, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-avx1.s Mon May  6 14:39:51 2019
@@ -1144,12 +1144,12 @@ vzeroupper
 # CHECK-NEXT:  1      12    1.00    *                   vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      4     1.00                        vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT:  2      11    1.00    *                   vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  1      5     1.00                        vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  1      5     1.00                        vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  1      5     1.00                        vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  1      5     1.00                        vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  1      5     1.00                        vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  1      5     1.00                        vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  1      5     1.00                        vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  1      5     1.00                        vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  1      4     1.00                        vcvtss2sd	%xmm0, %xmm1, %xmm2
@@ -1856,12 +1856,12 @@ vzeroupper
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -     1.00   1.00    -     vcvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtsd2ss	%xmm0, %xmm1, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -     1.00    -      -     1.00    -     vcvtsd2ss	(%rax), %xmm1, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     vcvtsi2sdl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     vcvtsi2sdq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     vcvtsi2sd	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     vcvtsi2sd	%rcx, %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     vcvtsi2sdl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     vcvtsi2sdq	(%rax), %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ssl	%ecx, %xmm0, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ssq	%rcx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ss	%ecx, %xmm0, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ss	%rcx, %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ssl	(%rax), %xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     vcvtsi2ssq	(%rax), %xmm0, %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     vcvtss2sd	%xmm0, %xmm1, %xmm2

Modified: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse1.s Mon May  6 14:39:51 2019
@@ -212,8 +212,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  1      4     1.00                        cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  1      5     1.00                        cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  1      5     1.00                        cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  1      5     1.00                        cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  1      5     1.00                        cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  1      5     1.00                        cvtss2si	%xmm0, %ecx
@@ -357,8 +357,8 @@ xorps       (%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtpi2ps	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtps2pi	%xmm0, %mm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtps2pi	(%rax), %mm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtsi2ssl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtsi2ssq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtsi2ss	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtsi2ss	%rcx, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtsi2ssl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -     0.50   0.50   1.00    -     cvtss2si	%xmm0, %ecx

Modified: llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s?rev=360085&r1=360084&r2=360085&view=diff
==============================================================================
--- llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s (original)
+++ llvm/trunk/test/tools/llvm-mca/X86/Znver1/resources-sse2.s Mon May  6 14:39:51 2019
@@ -444,8 +444,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  1      4     1.00                        cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT:  2      11    1.00    *                   cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  1      5     1.00                        cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  1      5     1.00                        cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  1      5     1.00                        cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  1      5     1.00                        cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      12    1.00    *                   cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  1      4     1.00                        cvtss2sd	%xmm0, %xmm2
@@ -732,8 +732,8 @@ xorpd       (%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -     1.00   1.00    -     cvtsd2si	(%rax), %rcx
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtsd2ss	%xmm0, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -     1.00    -      -     1.00    -     cvtsd2ss	(%rax), %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     cvtsi2sdl	%ecx, %xmm2
-# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     cvtsi2sdq	%rcx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     cvtsi2sd	%ecx, %xmm2
+# CHECK-NEXT:  -      -      -      -      -      -      -     0.33   0.33    -     1.33    -     cvtsi2sd	%rcx, %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT: 0.50   0.50    -      -      -      -      -      -      -      -     1.00    -     cvtsi2sdl	(%rax), %xmm2
 # CHECK-NEXT:  -      -      -      -      -      -      -      -      -      -     1.00    -     cvtss2sd	%xmm0, %xmm2




More information about the llvm-commits mailing list