[llvm] b240a29 - [x86] add AVX run to tests of fcmp logic; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 23 08:21:49 PDT 2021


Author: Sanjay Patel
Date: 2021-09-23T11:21:39-04:00
New Revision: b240a2980b6e3777d6659d993f3ba60a3ca84bcb

URL: https://github.com/llvm/llvm-project/commit/b240a2980b6e3777d6659d993f3ba60a3ca84bcb
DIFF: https://github.com/llvm/llvm-project/commit/b240a2980b6e3777d6659d993f3ba60a3ca84bcb.diff

LOG: [x86] add AVX run to tests of fcmp logic; NFC

The ISA before AVX has predicate gaps for both fcmp
codegen alternatives, so that requires a more
complicated fix to get ideal asm in all cases.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fcmp-logic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fcmp-logic.ll b/llvm/test/CodeGen/X86/fcmp-logic.ll
index 54f7183ef333..80d0b2b2a87c 100644
--- a/llvm/test/CodeGen/X86/fcmp-logic.ll
+++ b/llvm/test/CodeGen/X86/fcmp-logic.ll
@@ -1,15 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=avx  | FileCheck %s --check-prefixes=AVX
 
 define i1 @olt_ole_and_f32(float %w, float %x, float %y, float %z) {
-; CHECK-LABEL: olt_ole_and_f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomiss %xmm0, %xmm1
-; CHECK-NEXT:    seta %cl
-; CHECK-NEXT:    ucomiss %xmm2, %xmm3
-; CHECK-NEXT:    setae %al
-; CHECK-NEXT:    andb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: olt_ole_and_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomiss %xmm0, %xmm1
+; SSE-NEXT:    seta %cl
+; SSE-NEXT:    ucomiss %xmm2, %xmm3
+; SSE-NEXT:    setae %al
+; SSE-NEXT:    andb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: olt_ole_and_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomiss %xmm0, %xmm1
+; AVX-NEXT:    seta %cl
+; AVX-NEXT:    vucomiss %xmm2, %xmm3
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    andb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp olt float %w, %x
   %f2 = fcmp ole float %y, %z
   %r = and i1 %f1, %f2
@@ -17,16 +27,27 @@ define i1 @olt_ole_and_f32(float %w, float %x, float %y, float %z) {
 }
 
 define i1 @oge_oeq_or_f32(float %w, float %x, float %y, float %z) {
-; CHECK-LABEL: oge_oeq_or_f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomiss %xmm1, %xmm0
-; CHECK-NEXT:    setae %cl
-; CHECK-NEXT:    ucomiss %xmm3, %xmm2
-; CHECK-NEXT:    setnp %dl
-; CHECK-NEXT:    sete %al
-; CHECK-NEXT:    andb %dl, %al
-; CHECK-NEXT:    orb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: oge_oeq_or_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomiss %xmm1, %xmm0
+; SSE-NEXT:    setae %cl
+; SSE-NEXT:    ucomiss %xmm3, %xmm2
+; SSE-NEXT:    setnp %dl
+; SSE-NEXT:    sete %al
+; SSE-NEXT:    andb %dl, %al
+; SSE-NEXT:    orb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: oge_oeq_or_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomiss %xmm1, %xmm0
+; AVX-NEXT:    setae %cl
+; AVX-NEXT:    vucomiss %xmm3, %xmm2
+; AVX-NEXT:    setnp %dl
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    andb %dl, %al
+; AVX-NEXT:    orb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp oge float %w, %x
   %f2 = fcmp oeq float %y, %z
   %r = or i1 %f1, %f2
@@ -34,14 +55,23 @@ define i1 @oge_oeq_or_f32(float %w, float %x, float %y, float %z) {
 }
 
 define i1 @ord_one_xor_f32(float %w, float %x, float %y, float %z) {
-; CHECK-LABEL: ord_one_xor_f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomiss %xmm1, %xmm0
-; CHECK-NEXT:    setnp %cl
-; CHECK-NEXT:    ucomiss %xmm3, %xmm2
-; CHECK-NEXT:    setne %al
-; CHECK-NEXT:    xorb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: ord_one_xor_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomiss %xmm1, %xmm0
+; SSE-NEXT:    setnp %cl
+; SSE-NEXT:    ucomiss %xmm3, %xmm2
+; SSE-NEXT:    setne %al
+; SSE-NEXT:    xorb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: ord_one_xor_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomiss %xmm1, %xmm0
+; AVX-NEXT:    setnp %cl
+; AVX-NEXT:    vucomiss %xmm3, %xmm2
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    xorb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp ord float %w, %x
   %f2 = fcmp one float %y, %z
   %r = xor i1 %f1, %f2
@@ -49,16 +79,27 @@ define i1 @ord_one_xor_f32(float %w, float %x, float %y, float %z) {
 }
 
 define i1 @une_ugt_and_f64(double %w, double %x, double %y, double %z) {
-; CHECK-LABEL: une_ugt_and_f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomisd %xmm1, %xmm0
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    setne %cl
-; CHECK-NEXT:    orb %al, %cl
-; CHECK-NEXT:    ucomisd %xmm2, %xmm3
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    andb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: une_ugt_and_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    ucomisd %xmm2, %xmm3
+; SSE-NEXT:    setb %al
+; SSE-NEXT:    andb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: une_ugt_and_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    vucomisd %xmm2, %xmm3
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    andb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp une double %w, %x
   %f2 = fcmp ugt double %y, %z
   %r = and i1 %f1, %f2
@@ -66,14 +107,23 @@ define i1 @une_ugt_and_f64(double %w, double %x, double %y, double %z) {
 }
 
 define i1 @ult_uge_or_f64(double %w, double %x, double %y, double %z) {
-; CHECK-LABEL: ult_uge_or_f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomisd %xmm1, %xmm0
-; CHECK-NEXT:    setb %cl
-; CHECK-NEXT:    ucomisd %xmm2, %xmm3
-; CHECK-NEXT:    setbe %al
-; CHECK-NEXT:    orb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: ult_uge_or_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setb %cl
+; SSE-NEXT:    ucomisd %xmm2, %xmm3
+; SSE-NEXT:    setbe %al
+; SSE-NEXT:    orb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: ult_uge_or_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setb %cl
+; AVX-NEXT:    vucomisd %xmm2, %xmm3
+; AVX-NEXT:    setbe %al
+; AVX-NEXT:    orb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp ult double %w, %x
   %f2 = fcmp uge double %y, %z
   %r = or i1 %f1, %f2
@@ -81,16 +131,27 @@ define i1 @ult_uge_or_f64(double %w, double %x, double %y, double %z) {
 }
 
 define i1 @une_uno_xor_f64(double %w, double %x, double %y, double %z) {
-; CHECK-LABEL: une_uno_xor_f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomisd %xmm1, %xmm0
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    setne %cl
-; CHECK-NEXT:    orb %al, %cl
-; CHECK-NEXT:    ucomisd %xmm3, %xmm2
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    xorb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: une_uno_xor_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    ucomisd %xmm3, %xmm2
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    xorb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: une_uno_xor_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    vucomisd %xmm3, %xmm2
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    xorb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp une double %w, %x
   %f2 = fcmp uno double %y, %z
   %r = xor i1 %f1, %f2
@@ -98,14 +159,23 @@ define i1 @une_uno_xor_f64(double %w, double %x, double %y, double %z) {
 }
 
 define i1 @olt_olt_and_f32_f64(float %w, float %x, double %y, double %z) {
-; CHECK-LABEL: olt_olt_and_f32_f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomiss %xmm0, %xmm1
-; CHECK-NEXT:    seta %cl
-; CHECK-NEXT:    ucomisd %xmm2, %xmm3
-; CHECK-NEXT:    seta %al
-; CHECK-NEXT:    andb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: olt_olt_and_f32_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomiss %xmm0, %xmm1
+; SSE-NEXT:    seta %cl
+; SSE-NEXT:    ucomisd %xmm2, %xmm3
+; SSE-NEXT:    seta %al
+; SSE-NEXT:    andb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: olt_olt_and_f32_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomiss %xmm0, %xmm1
+; AVX-NEXT:    seta %cl
+; AVX-NEXT:    vucomisd %xmm2, %xmm3
+; AVX-NEXT:    seta %al
+; AVX-NEXT:    andb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp olt float %w, %x
   %f2 = fcmp olt double %y, %z
   %r = and i1 %f1, %f2
@@ -113,17 +183,29 @@ define i1 @olt_olt_and_f32_f64(float %w, float %x, double %y, double %z) {
 }
 
 define i1 @une_uno_xor_f64_use1(double %w, double %x, double %y, double %z, i1* %p) {
-; CHECK-LABEL: une_uno_xor_f64_use1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomisd %xmm1, %xmm0
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    setne %cl
-; CHECK-NEXT:    orb %al, %cl
-; CHECK-NEXT:    movb %cl, (%rdi)
-; CHECK-NEXT:    ucomisd %xmm3, %xmm2
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    xorb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: une_uno_xor_f64_use1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    movb %cl, (%rdi)
+; SSE-NEXT:    ucomisd %xmm3, %xmm2
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    xorb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: une_uno_xor_f64_use1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    movb %cl, (%rdi)
+; AVX-NEXT:    vucomisd %xmm3, %xmm2
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    xorb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp une double %w, %x
   store i1 %f1, i1* %p
   %f2 = fcmp uno double %y, %z
@@ -132,17 +214,29 @@ define i1 @une_uno_xor_f64_use1(double %w, double %x, double %y, double %z, i1*
 }
 
 define i1 @une_uno_xor_f64_use2(double %w, double %x, double %y, double %z, i1* %p) {
-; CHECK-LABEL: une_uno_xor_f64_use2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ucomisd %xmm1, %xmm0
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    setne %cl
-; CHECK-NEXT:    orb %al, %cl
-; CHECK-NEXT:    ucomisd %xmm3, %xmm2
-; CHECK-NEXT:    setp %al
-; CHECK-NEXT:    setp (%rdi)
-; CHECK-NEXT:    xorb %cl, %al
-; CHECK-NEXT:    retq
+; SSE-LABEL: une_uno_xor_f64_use2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    ucomisd %xmm1, %xmm0
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setne %cl
+; SSE-NEXT:    orb %al, %cl
+; SSE-NEXT:    ucomisd %xmm3, %xmm2
+; SSE-NEXT:    setp %al
+; SSE-NEXT:    setp (%rdi)
+; SSE-NEXT:    xorb %cl, %al
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: une_uno_xor_f64_use2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vucomisd %xmm1, %xmm0
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    orb %al, %cl
+; AVX-NEXT:    vucomisd %xmm3, %xmm2
+; AVX-NEXT:    setp %al
+; AVX-NEXT:    setp (%rdi)
+; AVX-NEXT:    xorb %cl, %al
+; AVX-NEXT:    retq
   %f1 = fcmp une double %w, %x
   %f2 = fcmp uno double %y, %z
   store i1 %f2, i1* %p


        


More information about the llvm-commits mailing list