[llvm] 8985755 - [InstSimplify] add limit folds for fmin/fmax

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 15 08:03:29 PDT 2020


Author: Sanjay Patel
Date: 2020-09-15T10:58:44-04:00
New Revision: 8985755762a429573af2ce657274772339d3b9db

URL: https://github.com/llvm/llvm-project/commit/8985755762a429573af2ce657274772339d3b9db
DIFF: https://github.com/llvm/llvm-project/commit/8985755762a429573af2ce657274772339d3b9db.diff

LOG: [InstSimplify] add limit folds for fmin/fmax

If the constant operand is the opposite of the min/max value,
then the result must be the other value.

This is based on the similar codegen transform proposed in:
D87571

Added: 
    

Modified: 
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Transforms/InstSimplify/fminmax-folds.ll
    llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 716af06769f9..9e38a4d8595a 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5477,10 +5477,12 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
       if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
         return ConstantFP::get(ReturnType, *C);
 
-      // TODO: minimum(nnan x, inf) -> x
-      // TODO: minnum(nnan ninf x, flt_max) -> x
-      // TODO: maximum(nnan x, -inf) -> x
-      // TODO: maxnum(nnan ninf x, -flt_max) -> x
+      // minnum(X, +inf) -> X if nnan
+      // maxnum(X, -inf) -> X if nnan
+      // minimum(X, +inf) -> X
+      // maximum(X, -inf) -> X
+      if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
+        return Op0;
     }
 
     // Min/max of the same operation with common operand:

diff  --git a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll
index f05837a8c2f6..c62f76c87fae 100644
--- a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll
+++ b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll
@@ -79,8 +79,7 @@ define float @test_maximum_const_inf(float %x) {
 
 define float @test_minimum_const_inf(float %x) {
 ; CHECK-LABEL: @test_minimum_const_inf(
-; CHECK-NEXT:    [[R:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float 0x7FF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call float @llvm.minimum.f32(float %x, float 0x7ff0000000000000)
   ret float %r
@@ -105,8 +104,7 @@ define float @test_maxnum_const_neg_inf(float %x) {
 
 define float @test_maximum_const_neg_inf(float %x) {
 ; CHECK-LABEL: @test_maximum_const_neg_inf(
-; CHECK-NEXT:    [[R:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float 0xFFF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call float @llvm.maximum.f32(float %x, float 0xfff0000000000000)
   ret float %r
@@ -123,8 +121,7 @@ define float @test_minimum_const_neg_inf(float %x) {
 
 define float @test_minnum_const_inf_nnan(float %x) {
 ; CHECK-LABEL: @test_minnum_const_inf_nnan(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.minnum.f32(float [[X:%.*]], float 0x7FF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.minnum.f32(float %x, float 0x7ff0000000000000)
   ret float %r
@@ -148,8 +145,7 @@ define float @test_maximum_const_inf_nnan(float %x) {
 
 define float @test_minimum_const_inf_nnan(float %x) {
 ; CHECK-LABEL: @test_minimum_const_inf_nnan(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.minimum.f32(float [[X:%.*]], float 0x7FF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.minimum.f32(float %x, float 0x7ff0000000000000)
   ret float %r
@@ -157,8 +153,7 @@ define float @test_minimum_const_inf_nnan(float %x) {
 
 define float @test_minnum_const_inf_nnan_comm(float %x) {
 ; CHECK-LABEL: @test_minnum_const_inf_nnan_comm(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.minnum.f32(float 0x7FF0000000000000, float [[X:%.*]])
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.minnum.f32(float 0x7ff0000000000000, float %x)
   ret float %r
@@ -182,8 +177,7 @@ define float @test_maximum_const_inf_nnan_comm(float %x) {
 
 define float @test_minimum_const_inf_nnan_comm(float %x) {
 ; CHECK-LABEL: @test_minimum_const_inf_nnan_comm(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.minimum.f32(float 0x7FF0000000000000, float [[X:%.*]])
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.minimum.f32(float 0x7ff0000000000000, float %x)
   ret float %r
@@ -191,8 +185,7 @@ define float @test_minimum_const_inf_nnan_comm(float %x) {
 
 define <2 x float> @test_minnum_const_inf_nnan_comm_vec(<2 x float> %x) {
 ; CHECK-LABEL: @test_minnum_const_inf_nnan_comm_vec(
-; CHECK-NEXT:    [[R:%.*]] = call nnan <2 x float> @llvm.minnum.v2f32(<2 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000>, <2 x float> [[X:%.*]])
-; CHECK-NEXT:    ret <2 x float> [[R]]
+; CHECK-NEXT:    ret <2 x float> [[X:%.*]]
 ;
   %r = call nnan <2 x float> @llvm.minnum.v2f32(<2 x float> <float 0x7ff0000000000000, float 0x7ff0000000000000>, <2 x float> %x)
   ret <2 x float> %r
@@ -216,8 +209,7 @@ define <2 x float> @test_maximum_const_inf_nnan_comm_vec(<2 x float> %x) {
 
 define <2 x float> @test_minimum_const_inf_nnan_comm_vec(<2 x float> %x) {
 ; CHECK-LABEL: @test_minimum_const_inf_nnan_comm_vec(
-; CHECK-NEXT:    [[R:%.*]] = call nnan <2 x float> @llvm.minimum.v2f32(<2 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000>, <2 x float> [[X:%.*]])
-; CHECK-NEXT:    ret <2 x float> [[R]]
+; CHECK-NEXT:    ret <2 x float> [[X:%.*]]
 ;
   %r = call nnan <2 x float> @llvm.minimum.v2f32(<2 x float> <float 0x7ff0000000000000, float 0x7ff0000000000000>, <2 x float> %x)
   ret <2 x float> %r
@@ -233,8 +225,7 @@ define float @test_minnum_const_neg_inf_nnan(float %x) {
 
 define float @test_maxnum_const_neg_inf_nnan(float %x) {
 ; CHECK-LABEL: @test_maxnum_const_neg_inf_nnan(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.maxnum.f32(float [[X:%.*]], float 0xFFF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.maxnum.f32(float %x, float 0xfff0000000000000)
   ret float %r
@@ -242,8 +233,7 @@ define float @test_maxnum_const_neg_inf_nnan(float %x) {
 
 define float @test_maximum_const_neg_inf_nnan(float %x) {
 ; CHECK-LABEL: @test_maximum_const_neg_inf_nnan(
-; CHECK-NEXT:    [[R:%.*]] = call nnan float @llvm.maximum.f32(float [[X:%.*]], float 0xFFF0000000000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan float @llvm.maximum.f32(float %x, float 0xfff0000000000000)
   ret float %r
@@ -357,8 +347,7 @@ define float @test_maximum_const_max_ninf(float %x) {
 
 define float @test_minimum_const_max_ninf(float %x) {
 ; CHECK-LABEL: @test_minimum_const_max_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call ninf float @llvm.minimum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call ninf float @llvm.minimum.f32(float %x, float 0x47efffffe0000000)
   ret float %r
@@ -383,8 +372,7 @@ define float @test_maxnum_const_neg_max_ninf(float %x) {
 
 define float @test_maximum_const_neg_max_ninf(float %x) {
 ; CHECK-LABEL: @test_maximum_const_neg_max_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call ninf float @llvm.maximum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call ninf float @llvm.maximum.f32(float %x, float 0xc7efffffe0000000)
   ret float %r
@@ -401,8 +389,7 @@ define float @test_minimum_const_neg_max_ninf(float %x) {
 
 define float @test_minnum_const_max_nnan_ninf(float %x) {
 ; CHECK-LABEL: @test_minnum_const_max_nnan_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call nnan ninf float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan ninf float @llvm.minnum.f32(float %x, float 0x47efffffe0000000)
   ret float %r
@@ -426,8 +413,7 @@ define float @test_maximum_const_max_nnan_ninf(float %x) {
 
 define float @test_minimum_const_max_nnan_ninf(float %x) {
 ; CHECK-LABEL: @test_minimum_const_max_nnan_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call nnan ninf float @llvm.minimum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan ninf float @llvm.minimum.f32(float %x, float 0x47efffffe0000000)
   ret float %r
@@ -443,8 +429,7 @@ define float @test_minnum_const_neg_max_nnan_ninf(float %x) {
 
 define float @test_maxnum_const_neg_max_nnan_ninf(float %x) {
 ; CHECK-LABEL: @test_maxnum_const_neg_max_nnan_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call nnan ninf float @llvm.maxnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan ninf float @llvm.maxnum.f32(float %x, float 0xc7efffffe0000000)
   ret float %r
@@ -452,8 +437,7 @@ define float @test_maxnum_const_neg_max_nnan_ninf(float %x) {
 
 define float @test_maximum_const_neg_max_nnan_ninf(float %x) {
 ; CHECK-LABEL: @test_maximum_const_neg_max_nnan_ninf(
-; CHECK-NEXT:    [[R:%.*]] = call nnan ninf float @llvm.maximum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000)
-; CHECK-NEXT:    ret float [[R]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %r = call nnan ninf float @llvm.maximum.f32(float %x, float 0xc7efffffe0000000)
   ret float %r
@@ -1076,8 +1060,7 @@ define <2 x double> @minimum_neginf_commute_vec(<2 x double> %x) {
 
 define float @minimum_inf(float %x) {
 ; CHECK-LABEL: @minimum_inf(
-; CHECK-NEXT:    [[VAL:%.*]] = call float @llvm.minimum.f32(float 0x7FF0000000000000, float [[X:%.*]])
-; CHECK-NEXT:    ret float [[VAL]]
+; CHECK-NEXT:    ret float [[X:%.*]]
 ;
   %val = call float @llvm.minimum.f32(float 0x7FF0000000000000, float %x)
   ret float %val

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
index 0e02a01291d8..c3699ff0d6b4 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
@@ -12,7 +12,7 @@ define i32 @add_v4i32(i32* %p) #0 {
 ; CHECK-LABEL: @add_v4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <4 x i32>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, !tbaa !0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, [[TBAA0:!tbaa !.*]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <4 x i32> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
@@ -51,7 +51,7 @@ define signext i16 @mul_v8i16(i16* %p) #0 {
 ; CHECK-LABEL: @mul_v8i16(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i16* [[P:%.*]] to <8 x i16>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2, !tbaa !4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2, [[TBAA4:!tbaa !.*]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = mul <8 x i16> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <8 x i16> [[BIN_RDX]], <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -95,7 +95,7 @@ define signext i8 @or_v16i8(i8* %p) #0 {
 ; CHECK-LABEL: @or_v16i8(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to <16 x i8>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1, !tbaa !6
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1, [[TBAA6:!tbaa !.*]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = or <16 x i8> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <16 x i8> [[BIN_RDX]], <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -141,7 +141,7 @@ define i32 @smin_v4i32(i32* %p) #0 {
 ; CHECK-LABEL: @smin_v4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <4 x i32>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, !tbaa !0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, [[TBAA0]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp slt <4 x i32> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP]], <4 x i32> [[TMP1]], <4 x i32> [[RDX_SHUF]]
@@ -195,7 +195,7 @@ define i32 @umax_v4i32(i32* %p) #0 {
 ; CHECK-LABEL: @umax_v4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <4 x i32>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, !tbaa !0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, [[TBAA0]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp ugt <4 x i32> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP]], <4 x i32> [[TMP1]], <4 x i32> [[RDX_SHUF]]
@@ -249,7 +249,7 @@ define float @fadd_v4i32(float* %p) #0 {
 ; CHECK-LABEL: @fadd_v4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[P:%.*]] to <4 x float>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, !tbaa !7
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, [[TBAA7:!tbaa !.*]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
@@ -290,7 +290,7 @@ define float @fmul_v4i32(float* %p) #0 {
 ; CHECK-LABEL: @fmul_v4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[P:%.*]] to <4 x float>*
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, !tbaa !7
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, [[TBAA7]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = fmul fast <4 x float> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
@@ -330,18 +330,17 @@ for.end:
 define float @fmin_v4i32(float* %p) #0 {
 ; CHECK-LABEL: @fmin_v4i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[P:%.*]], align 4, !tbaa !7
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP0]], float 0x47EFFFFFE0000000)
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[P:%.*]], align 4, [[TBAA7]]
 ; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[P]], i64 1
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[ARRAYIDX_1]], align 4, !tbaa !7
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP2]], float [[TMP1]])
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4, [[TBAA7]]
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP1]], float [[TMP0]])
 ; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[P]], i64 2
-; CHECK-NEXT:    [[TMP4:%.*]] = load float, float* [[ARRAYIDX_2]], align 4, !tbaa !7
-; CHECK-NEXT:    [[TMP5:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP4]], float [[TMP3]])
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[ARRAYIDX_2]], align 4, [[TBAA7]]
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP3]], float [[TMP2]])
 ; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[P]], i64 3
-; CHECK-NEXT:    [[TMP6:%.*]] = load float, float* [[ARRAYIDX_3]], align 4, !tbaa !7
-; CHECK-NEXT:    [[TMP7:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP6]], float [[TMP5]])
-; CHECK-NEXT:    ret float [[TMP7]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, float* [[ARRAYIDX_3]], align 4, [[TBAA7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = tail call fast float @llvm.minnum.f32(float [[TMP5]], float [[TMP4]])
+; CHECK-NEXT:    ret float [[TMP6]]
 ;
 entry:
   br label %for.cond


        


More information about the llvm-commits mailing list