[llvm] 0ec79f4 - [X86] Regenerate sqrt-fastmath-mir.ll
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue May 30 09:22:08 PDT 2023
Author: Simon Pilgrim
Date: 2023-05-30T17:21:53+01:00
New Revision: 0ec79f413e3a292063ca047b520b5b9b592cdc0c
URL: https://github.com/llvm/llvm-project/commit/0ec79f413e3a292063ca047b520b5b9b592cdc0c
DIFF: https://github.com/llvm/llvm-project/commit/0ec79f413e3a292063ca047b520b5b9b592cdc0c.diff
LOG: [X86] Regenerate sqrt-fastmath-mir.ll
Added:
Modified:
llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
index d3715f2eac164..8a7fea78702d8 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -6,12 +6,13 @@ declare float @llvm.sqrt.f32(float) #2
define float @sqrt_ieee(float %f) #0 {
; CHECK-LABEL: name: sqrt_ieee
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: %1:fr32 = nofpexcept VSQRTSSr killed [[DEF]], [[COPY]], implicit $mxcsr
- ; CHECK: $xmm0 = COPY %1
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VSQRTSSr:%[0-9]+]]:fr32 = nofpexcept VSQRTSSr killed [[DEF]], [[COPY]], implicit $mxcsr
+ ; CHECK-NEXT: $xmm0 = COPY [[VSQRTSSr]]
+ ; CHECK-NEXT: RET 0, $xmm0
%call = tail call float @llvm.sqrt.f32(float %f)
ret float %call
}
@@ -19,31 +20,32 @@ define float @sqrt_ieee(float %f) #0 {
define float @sqrt_ieee_ninf(float %f) #0 {
; CHECK-LABEL: name: sqrt_ieee_ninf
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
- ; CHECK: %3:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %5:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed %3, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %7:fr32 = ninf afn nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %8:fr32 = ninf afn nofpexcept VMULSSrr killed %7, killed %5, implicit $mxcsr
- ; CHECK: %9:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], %8, implicit $mxcsr
- ; CHECK: %10:fr32 = ninf afn nofpexcept VFMADD213SSr %8, %9, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: %11:fr32 = ninf afn nofpexcept VMULSSrr %9, [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %12:fr32 = ninf afn nofpexcept VMULSSrr killed %11, killed %10, implicit $mxcsr
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %12
- ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY]]
- ; CHECK: [[VPBROADCASTDrm:%[0-9]+]]:vr128 = VPBROADCASTDrm $rip, 1, $noreg, %const.2, $noreg :: (load (s32) from constant-pool)
- ; CHECK: [[VPANDrr:%[0-9]+]]:vr128 = VPANDrr killed [[COPY2]], killed [[VPBROADCASTDrm]]
- ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDrr]]
- ; CHECK: %18:fr32 = nofpexcept VCMPSSrm killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
- ; CHECK: [[COPY4:%[0-9]+]]:vr128 = COPY %18
- ; CHECK: [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY4]], killed [[COPY1]]
- ; CHECK: [[COPY5:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
- ; CHECK: $xmm0 = COPY [[COPY5]]
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VMULSSrr1:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr2:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr1]], killed [[VFMADD213SSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr3:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VMULSSrr2]], implicit $mxcsr
+ ; CHECK-NEXT: [[VFMADD213SSr1:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VMULSSrr2]], [[VMULSSrr3]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr4:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[VMULSSrr3]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr5:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr128 = COPY [[VMULSSrr5]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY]]
+ ; CHECK-NEXT: [[VPBROADCASTDrm:%[0-9]+]]:vr128 = VPBROADCASTDrm $rip, 1, $noreg, %const.2, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VPANDrr:%[0-9]+]]:vr128 = VPANDrr killed [[COPY2]], killed [[VPBROADCASTDrm]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDrr]]
+ ; CHECK-NEXT: [[VCMPSSrm:%[0-9]+]]:fr32 = nofpexcept VCMPSSrm killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr128 = COPY [[VCMPSSrm]]
+ ; CHECK-NEXT: [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY4]], killed [[COPY1]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
+ ; CHECK-NEXT: $xmm0 = COPY [[COPY5]]
+ ; CHECK-NEXT: RET 0, $xmm0
%call = tail call ninf afn float @llvm.sqrt.f32(float %f)
ret float %call
}
@@ -51,12 +53,13 @@ define float @sqrt_ieee_ninf(float %f) #0 {
define float @sqrt_daz(float %f) #1 {
; CHECK-LABEL: name: sqrt_daz
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: %1:fr32 = nofpexcept VSQRTSSr killed [[DEF]], [[COPY]], implicit $mxcsr
- ; CHECK: $xmm0 = COPY %1
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VSQRTSSr:%[0-9]+]]:fr32 = nofpexcept VSQRTSSr killed [[DEF]], [[COPY]], implicit $mxcsr
+ ; CHECK-NEXT: $xmm0 = COPY [[VSQRTSSr]]
+ ; CHECK-NEXT: RET 0, $xmm0
%call = tail call float @llvm.sqrt.f32(float %f)
ret float %call
}
@@ -64,28 +67,29 @@ define float @sqrt_daz(float %f) #1 {
define float @sqrt_daz_ninf(float %f) #1 {
; CHECK-LABEL: name: sqrt_daz_ninf
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
- ; CHECK: %3:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %5:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed %3, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %7:fr32 = ninf afn nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %8:fr32 = ninf afn nofpexcept VMULSSrr killed %7, killed %5, implicit $mxcsr
- ; CHECK: %9:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], %8, implicit $mxcsr
- ; CHECK: %10:fr32 = ninf afn nofpexcept VFMADD213SSr %8, %9, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: %11:fr32 = ninf afn nofpexcept VMULSSrr %9, [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %12:fr32 = ninf afn nofpexcept VMULSSrr killed %11, killed %10, implicit $mxcsr
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %12
- ; CHECK: [[FsFLD0SS:%[0-9]+]]:fr32 = FsFLD0SS
- ; CHECK: %15:fr32 = nofpexcept VCMPSSrr [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
- ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY %15
- ; CHECK: [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY2]], killed [[COPY1]]
- ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
- ; CHECK: $xmm0 = COPY [[COPY3]]
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VMULSSrr1:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr2:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr1]], killed [[VFMADD213SSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr3:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[COPY]], [[VMULSSrr2]], implicit $mxcsr
+ ; CHECK-NEXT: [[VFMADD213SSr1:%[0-9]+]]:fr32 = ninf afn nofpexcept VFMADD213SSr [[VMULSSrr2]], [[VMULSSrr3]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr4:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr [[VMULSSrr3]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr5:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr128 = COPY [[VMULSSrr5]]
+ ; CHECK-NEXT: [[FsFLD0SS:%[0-9]+]]:fr32 = FsFLD0SS
+ ; CHECK-NEXT: [[VCMPSSrr:%[0-9]+]]:fr32 = nofpexcept VCMPSSrr [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr128 = COPY [[VCMPSSrr]]
+ ; CHECK-NEXT: [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY2]], killed [[COPY1]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
+ ; CHECK-NEXT: $xmm0 = COPY [[COPY3]]
+ ; CHECK-NEXT: RET 0, $xmm0
%call = tail call ninf afn float @llvm.sqrt.f32(float %f)
ret float %call
}
@@ -93,22 +97,23 @@ define float @sqrt_daz_ninf(float %f) #1 {
define float @rsqrt_ieee(float %f) #0 {
; CHECK-LABEL: name: rsqrt_ieee
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: [[VRSQRTSSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc VRSQRTSSr killed [[DEF]], [[COPY]]
- ; CHECK: %3:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %5:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed %3, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %7:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %8:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed %7, killed %5, implicit $mxcsr
- ; CHECK: %9:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], %8, implicit $mxcsr
- ; CHECK: %10:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr %8, killed %9, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: %11:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr %8, [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %12:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed %11, killed %10, implicit $mxcsr
- ; CHECK: $xmm0 = COPY %12
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VMULSSrr1:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr2:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed [[VMULSSrr1]], killed [[VFMADD213SSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr3:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VMULSSrr2]], implicit $mxcsr
+ ; CHECK-NEXT: [[VFMADD213SSr1:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VMULSSrr2]], killed [[VMULSSrr3]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr4:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VMULSSrr2]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr5:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
+ ; CHECK-NEXT: $xmm0 = COPY [[VMULSSrr5]]
+ ; CHECK-NEXT: RET 0, $xmm0
%sqrt = tail call float @llvm.sqrt.f32(float %f)
%div = fdiv fast float 1.0, %sqrt
ret float %div
@@ -117,22 +122,23 @@ define float @rsqrt_ieee(float %f) #0 {
define float @rsqrt_daz(float %f) #1 {
; CHECK-LABEL: name: rsqrt_daz
; CHECK: bb.0 (%ir-block.0):
- ; CHECK: liveins: $xmm0
- ; CHECK: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; CHECK: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
- ; CHECK: [[VRSQRTSSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc VRSQRTSSr killed [[DEF]], [[COPY]]
- ; CHECK: %3:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %5:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed %3, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
- ; CHECK: %7:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %8:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed %7, killed %5, implicit $mxcsr
- ; CHECK: %9:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], %8, implicit $mxcsr
- ; CHECK: %10:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr %8, killed %9, [[VMOVSSrm_alt]], implicit $mxcsr
- ; CHECK: %11:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr %8, [[VMOVSSrm_alt1]], implicit $mxcsr
- ; CHECK: %12:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed %11, killed %10, implicit $mxcsr
- ; CHECK: $xmm0 = COPY %12
- ; CHECK: RET 0, $xmm0
+ ; CHECK-NEXT: liveins: $xmm0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fr32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[VRSQRTSSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc VRSQRTSSr killed [[DEF]], [[COPY]]
+ ; CHECK-NEXT: [[VMULSSrr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VFMADD213SSr:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed [[VMULSSrr]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMOVSSrm_alt1:%[0-9]+]]:fr32 = VMOVSSrm_alt $rip, 1, $noreg, %const.1, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: [[VMULSSrr1:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr2:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed [[VMULSSrr1]], killed [[VFMADD213SSr]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr3:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[COPY]], [[VMULSSrr2]], implicit $mxcsr
+ ; CHECK-NEXT: [[VFMADD213SSr1:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VFMADD213SSr [[VMULSSrr2]], killed [[VMULSSrr3]], [[VMOVSSrm_alt]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr4:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr [[VMULSSrr2]], [[VMOVSSrm_alt1]], implicit $mxcsr
+ ; CHECK-NEXT: [[VMULSSrr5:%[0-9]+]]:fr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
+ ; CHECK-NEXT: $xmm0 = COPY [[VMULSSrr5]]
+ ; CHECK-NEXT: RET 0, $xmm0
%sqrt = tail call float @llvm.sqrt.f32(float %f)
%div = fdiv fast float 1.0, %sqrt
ret float %div
More information about the llvm-commits
mailing list