[llvm] r296293 - [X86] Fix execution domain for cmpss/sd instructions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 25 22:45:59 PST 2017


Author: ctopper
Date: Sun Feb 26 00:45:59 2017
New Revision: 296293

URL: http://llvm.org/viewvc/llvm-project?rev=296293&view=rev
Log:
[X86] Fix execution domain for cmpss/sd instructions.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll
    llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll
    llvm/trunk/test/CodeGen/X86/logical-load-fold.ll
    llvm/trunk/test/CodeGen/X86/sse-minmax.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=296293&r1=296292&r2=296293&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sun Feb 26 00:45:59 2017
@@ -2308,10 +2308,12 @@ multiclass sse12_cmp_scalar<RegisterClas
   }
 }
 
+let ExeDomain = SSEPackedSingle in
 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
                  "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                  "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
                  SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG, VEX_WIG;
+let ExeDomain = SSEPackedDouble in
 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
                  "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                  "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
@@ -2319,10 +2321,12 @@ defm VCMPSD : sse12_cmp_scalar<FR64, f64
                  XD, VEX_4V, VEX_LIG, VEX_WIG;
 
 let Constraints = "$src1 = $dst" in {
+  let ExeDomain = SSEPackedSingle in
   defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
                   "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
                   "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
                   i8immZExt3>, XS;
+  let ExeDomain = SSEPackedDouble in
   defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
                   "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
                   "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
@@ -2348,18 +2352,22 @@ multiclass sse12_cmp_scalar_int<Operand
 
 let isCodeGenOnly = 1 in {
   // Aliases to match intrinsics which expect XMM operand(s).
+  let ExeDomain = SSEPackedSingle in
   defm Int_VCMPSS  : sse12_cmp_scalar_int<ssmem, AVXCC, int_x86_sse_cmp_ss,
                        "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
                        SSE_ALU_F32S, i8immZExt5, sse_load_f32>,
                        XS, VEX_4V;
+  let ExeDomain = SSEPackedDouble in
   defm Int_VCMPSD  : sse12_cmp_scalar_int<sdmem, AVXCC, int_x86_sse2_cmp_sd,
                        "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
                        SSE_ALU_F32S, i8immZExt5, sse_load_f64>, // same latency as f32
                        XD, VEX_4V;
   let Constraints = "$src1 = $dst" in {
+    let ExeDomain = SSEPackedSingle in
     defm Int_CMPSS  : sse12_cmp_scalar_int<ssmem, SSECC, int_x86_sse_cmp_ss,
                          "cmp${cc}ss\t{$src, $dst|$dst, $src}",
                          SSE_ALU_F32S, i8immZExt3, sse_load_f32>, XS;
+    let ExeDomain = SSEPackedDouble in
     defm Int_CMPSD  : sse12_cmp_scalar_int<sdmem, SSECC, int_x86_sse2_cmp_sd,
                          "cmp${cc}sd\t{$src, $dst|$dst, $src}",
                          SSE_ALU_F64S, i8immZExt3, sse_load_f64>,

Modified: llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll?rev=296293&r1=296292&r2=296293&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-select-sse.ll Sun Feb 26 00:45:59 2017
@@ -39,9 +39,9 @@ define double @select_fcmp_oeq_f64(doubl
 ; SSE-LABEL: select_fcmp_oeq_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpeqsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oeq_f64:
@@ -94,10 +94,10 @@ define double @select_fcmp_ogt_f64(doubl
 ; SSE-LABEL: select_fcmp_ogt_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpltsd %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm1, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    andpd %xmm1, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm1
+; SSE-NEXT:    orpd %xmm2, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ogt_f64:
@@ -150,10 +150,10 @@ define double @select_fcmp_oge_f64(doubl
 ; SSE-LABEL: select_fcmp_oge_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmplesd %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm1, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    andpd %xmm1, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm1
+; SSE-NEXT:    orpd %xmm2, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_oge_f64:
@@ -205,9 +205,9 @@ define double @select_fcmp_olt_f64(doubl
 ; SSE-LABEL: select_fcmp_olt_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpltsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_olt_f64:
@@ -259,9 +259,9 @@ define double @select_fcmp_ole_f64(doubl
 ; SSE-LABEL: select_fcmp_ole_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmplesd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ole_f64:
@@ -313,9 +313,9 @@ define double @select_fcmp_ord_f64(doubl
 ; SSE-LABEL: select_fcmp_ord_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpordsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ord_f64:
@@ -367,9 +367,9 @@ define double @select_fcmp_uno_f64(doubl
 ; SSE-LABEL: select_fcmp_uno_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpunordsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uno_f64:
@@ -421,9 +421,9 @@ define double @select_fcmp_ugt_f64(doubl
 ; SSE-LABEL: select_fcmp_ugt_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpnlesd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ugt_f64:
@@ -475,9 +475,9 @@ define double @select_fcmp_uge_f64(doubl
 ; SSE-LABEL: select_fcmp_uge_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpnltsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_uge_f64:
@@ -530,10 +530,10 @@ define double @select_fcmp_ult_f64(doubl
 ; SSE-LABEL: select_fcmp_ult_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpnlesd %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm1, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    andpd %xmm1, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm1
+; SSE-NEXT:    orpd %xmm2, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ult_f64:
@@ -586,10 +586,10 @@ define double @select_fcmp_ule_f64(doubl
 ; SSE-LABEL: select_fcmp_ule_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpnltsd %xmm0, %xmm1
-; SSE-NEXT:    andps %xmm1, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    andpd %xmm1, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm1
+; SSE-NEXT:    orpd %xmm2, %xmm1
+; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_ule_f64:
@@ -641,9 +641,9 @@ define double @select_fcmp_une_f64(doubl
 ; SSE-LABEL: select_fcmp_une_f64:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    cmpneqsd %xmm1, %xmm0
-; SSE-NEXT:    andps %xmm0, %xmm2
-; SSE-NEXT:    andnps %xmm3, %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd %xmm3, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: select_fcmp_une_f64:

Modified: llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll?rev=296293&r1=296292&r2=296293&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fp-select-cmp-and.ll Sun Feb 26 00:45:59 2017
@@ -5,7 +5,7 @@ define double @test1(double %a, double %
 ; CHECK-LABEL: test1:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmpltsd %xmm2, %xmm0
-; CHECK-NEXT:    andps %xmm1, %xmm0
+; CHECK-NEXT:    andpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp olt double %a, %eps
@@ -17,7 +17,7 @@ define double @test2(double %a, double %
 ; CHECK-LABEL: test2:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmplesd %xmm2, %xmm0
-; CHECK-NEXT:    andps %xmm1, %xmm0
+; CHECK-NEXT:    andpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp ole double %a, %eps
@@ -29,8 +29,8 @@ define double @test3(double %a, double %
 ; CHECK-LABEL: test3:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmpltsd %xmm0, %xmm2
-; CHECK-NEXT:    andps %xmm1, %xmm2
-; CHECK-NEXT:    movaps %xmm2, %xmm0
+; CHECK-NEXT:    andpd %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp ogt double %a, %eps
@@ -42,8 +42,8 @@ define double @test4(double %a, double %
 ; CHECK-LABEL: test4:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm2
-; CHECK-NEXT:    andps %xmm1, %xmm2
-; CHECK-NEXT:    movaps %xmm2, %xmm0
+; CHECK-NEXT:    andpd %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp oge double %a, %eps
@@ -55,7 +55,7 @@ define double @test5(double %a, double %
 ; CHECK-LABEL: test5:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmpltsd %xmm2, %xmm0
-; CHECK-NEXT:    andnps %xmm1, %xmm0
+; CHECK-NEXT:    andnpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp olt double %a, %eps
@@ -67,7 +67,7 @@ define double @test6(double %a, double %
 ; CHECK-LABEL: test6:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmplesd %xmm2, %xmm0
-; CHECK-NEXT:    andnps %xmm1, %xmm0
+; CHECK-NEXT:    andnpd %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp ole double %a, %eps
@@ -79,8 +79,8 @@ define double @test7(double %a, double %
 ; CHECK-LABEL: test7:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmpltsd %xmm0, %xmm2
-; CHECK-NEXT:    andnps %xmm1, %xmm2
-; CHECK-NEXT:    movaps %xmm2, %xmm0
+; CHECK-NEXT:    andnpd %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp ogt double %a, %eps
@@ -92,8 +92,8 @@ define double @test8(double %a, double %
 ; CHECK-LABEL: test8:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm2
-; CHECK-NEXT:    andnps %xmm1, %xmm2
-; CHECK-NEXT:    movaps %xmm2, %xmm0
+; CHECK-NEXT:    andnpd %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp oge double %a, %eps
@@ -220,10 +220,10 @@ define double @test18(double %a, double
 ; CHECK-LABEL: test18:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    cmplesd %xmm0, %xmm3
-; CHECK-NEXT:    andps %xmm3, %xmm2
-; CHECK-NEXT:    andnps %xmm1, %xmm3
-; CHECK-NEXT:    orps %xmm2, %xmm3
-; CHECK-NEXT:    movaps %xmm3, %xmm0
+; CHECK-NEXT:    andpd %xmm3, %xmm2
+; CHECK-NEXT:    andnpd %xmm1, %xmm3
+; CHECK-NEXT:    orpd %xmm2, %xmm3
+; CHECK-NEXT:    movapd %xmm3, %xmm0
 ; CHECK-NEXT:    retq
 ;
   %cmp = fcmp oge double %a, %eps

Modified: llvm/trunk/test/CodeGen/X86/logical-load-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/logical-load-fold.ll?rev=296293&r1=296292&r2=296293&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/logical-load-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/logical-load-fold.ll Sun Feb 26 00:45:59 2017
@@ -15,14 +15,14 @@ define double @load_double_no_fold(doubl
 ; SSE2:       # BB#0:
 ; SSE2-NEXT:    cmplesd %xmm0, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    andps %xmm1, %xmm0
+; SSE2-NEXT:    andpd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: load_double_no_fold:
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 
   %cmp = fcmp oge double %x, %y

Modified: llvm/trunk/test/CodeGen/X86/sse-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-minmax.ll?rev=296293&r1=296292&r2=296293&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-minmax.ll Sun Feb 26 00:45:59 2017
@@ -80,11 +80,11 @@ define double @olt_inverse(double %x, do
 define double @oge(double %x, double %y)  {
 ; STRICT-LABEL: oge:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge:
@@ -99,12 +99,12 @@ define double @oge(double %x, double %y)
 define double @ole(double %x, double %y)  {
 ; STRICT-LABEL: ole:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm1, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm0, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole:
@@ -119,12 +119,12 @@ define double @ole(double %x, double %y)
 define double @oge_inverse(double %x, double %y)  {
 ; STRICT-LABEL: oge_inverse:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm2
-; STRICT-NEXT:    orps %xmm1, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm1
+; STRICT-NEXT:    andnpd %xmm0, %xmm2
+; STRICT-NEXT:    orpd %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse:
@@ -145,12 +145,12 @@ define double @oge_inverse(double %x, do
 define double @ole_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ole_inverse:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm1, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm2
-; STRICT-NEXT:    orps %xmm1, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm1
+; STRICT-NEXT:    andnpd %xmm0, %xmm2
+; STRICT-NEXT:    orpd %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse:
@@ -243,9 +243,9 @@ define double @olt_inverse_x(double %x)
 define double @oge_x(double %x)  {
 ; STRICT-LABEL: oge_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm1, %xmm1
+; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge_x:
@@ -261,11 +261,11 @@ define double @oge_x(double %x)  {
 define double @ole_x(double %x)  {
 ; STRICT-LABEL: ole_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm2, %xmm2
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    xorpd %xmm2, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole_x:
@@ -281,10 +281,10 @@ define double @ole_x(double %x)  {
 define double @oge_inverse_x(double %x)  {
 ; STRICT-LABEL: oge_inverse_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm1, %xmm1
+; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse_x:
@@ -307,11 +307,11 @@ define double @oge_inverse_x(double %x)
 define double @ole_inverse_x(double %x)  {
 ; STRICT-LABEL: ole_inverse_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm2, %xmm2
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    xorpd %xmm2, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse_x:
@@ -334,12 +334,12 @@ define double @ole_inverse_x(double %x)
 define double @ugt(double %x, double %y)  {
 ; STRICT-LABEL: ugt:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm0, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt:
@@ -354,11 +354,11 @@ define double @ugt(double %x, double %y)
 define double @ult(double %x, double %y)  {
 ; STRICT-LABEL: ult:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult:
@@ -373,12 +373,12 @@ define double @ult(double %x, double %y)
 define double @ugt_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ugt_inverse:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm2
-; STRICT-NEXT:    orps %xmm1, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm1
+; STRICT-NEXT:    andnpd %xmm0, %xmm2
+; STRICT-NEXT:    orpd %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse:
@@ -399,12 +399,12 @@ define double @ugt_inverse(double %x, do
 define double @ult_inverse(double %x, double %y)  {
 ; STRICT-LABEL: ult_inverse:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm2
-; STRICT-NEXT:    orps %xmm1, %xmm2
-; STRICT-NEXT:    movaps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm1
+; STRICT-NEXT:    andnpd %xmm0, %xmm2
+; STRICT-NEXT:    orpd %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse:
@@ -499,11 +499,11 @@ define double @ule_inverse(double %x, do
 define double @ugt_x(double %x)  {
 ; STRICT-LABEL: ugt_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm2, %xmm2
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    xorpd %xmm2, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt_x:
@@ -519,9 +519,9 @@ define double @ugt_x(double %x)  {
 define double @ult_x(double %x)  {
 ; STRICT-LABEL: ult_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm1, %xmm1
+; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult_x:
@@ -537,11 +537,11 @@ define double @ult_x(double %x)  {
 define double @ugt_inverse_x(double %x)  {
 ; STRICT-LABEL: ugt_inverse_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm2, %xmm2
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    xorpd %xmm2, %xmm2
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse_x:
@@ -564,10 +564,10 @@ define double @ugt_inverse_x(double %x)
 define double @ult_inverse_x(double %x)  {
 ; STRICT-LABEL: ult_inverse_x:
 ; STRICT:       # BB#0:
-; STRICT-NEXT:    xorps %xmm1, %xmm1
+; STRICT-NEXT:    xorpd %xmm1, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse_x:
@@ -743,11 +743,11 @@ define double @oge_y(double %x)  {
 ; STRICT-LABEL: oge_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: oge_y:
@@ -763,12 +763,12 @@ define double @ole_y(double %x)  {
 ; STRICT-LABEL: ole_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm0
-; STRICT-NEXT:    andnps %xmm2, %xmm1
-; STRICT-NEXT:    orps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm2, %xmm1
+; STRICT-NEXT:    orpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ole_y:
@@ -784,12 +784,12 @@ define double @oge_inverse_y(double %x)
 ; STRICT-LABEL: oge_inverse_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm2, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm0, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm2
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    orps %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm2
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    orpd %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: oge_inverse_y:
@@ -812,12 +812,12 @@ define double @ole_inverse_y(double %x)
 ; STRICT-LABEL: ole_inverse_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmplesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm2
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    orps %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm2
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    orpd %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ole_inverse_y:
@@ -840,12 +840,12 @@ define double @ugt_y(double %x)  {
 ; STRICT-LABEL: ugt_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm0
-; STRICT-NEXT:    andnps %xmm2, %xmm1
-; STRICT-NEXT:    orps %xmm0, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm0
+; STRICT-NEXT:    andnpd %xmm2, %xmm1
+; STRICT-NEXT:    orpd %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ugt_y:
@@ -861,11 +861,11 @@ define double @ult_y(double %x)  {
 ; STRICT-LABEL: ult_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm1, %xmm2
+; STRICT-NEXT:    movapd %xmm1, %xmm2
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm2
-; STRICT-NEXT:    andps %xmm2, %xmm0
-; STRICT-NEXT:    andnps %xmm1, %xmm2
-; STRICT-NEXT:    orps %xmm2, %xmm0
+; STRICT-NEXT:    andpd %xmm2, %xmm0
+; STRICT-NEXT:    andnpd %xmm1, %xmm2
+; STRICT-NEXT:    orpd %xmm2, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; RELAX-LABEL: ult_y:
@@ -881,12 +881,12 @@ define double @ugt_inverse_y(double %x)
 ; STRICT-LABEL: ugt_inverse_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm0, %xmm1
+; STRICT-NEXT:    movapd %xmm0, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm2
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    orps %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm2
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    orpd %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ugt_inverse_y:
@@ -909,12 +909,12 @@ define double @ult_inverse_y(double %x)
 ; STRICT-LABEL: ult_inverse_y:
 ; STRICT:       # BB#0:
 ; STRICT-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; STRICT-NEXT:    movaps %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm2, %xmm1
 ; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT:    andps %xmm1, %xmm2
-; STRICT-NEXT:    andnps %xmm0, %xmm1
-; STRICT-NEXT:    orps %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
+; STRICT-NEXT:    andpd %xmm1, %xmm2
+; STRICT-NEXT:    andnpd %xmm0, %xmm1
+; STRICT-NEXT:    orpd %xmm2, %xmm1
+; STRICT-NEXT:    movapd %xmm1, %xmm0
 ; STRICT-NEXT:    retq
 ;
 ; UNSAFE-LABEL: ult_inverse_y:




More information about the llvm-commits mailing list