[llvm] [InstCombine] Transform high latency, dependent FSQRT/FDIV into FMUL (PR #87474)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 10:50:50 PDT 2024


================
@@ -0,0 +1,406 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes='instcombine<no-verify-fixpoint>' < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+ at x = dso_local local_unnamed_addr global double 0.000000e+00, align 8
+ at r1 = dso_local local_unnamed_addr global double 0.000000e+00, align 8
+ at r2 = dso_local local_unnamed_addr global double 0.000000e+00, align 8
+ at r3 = dso_local local_unnamed_addr global double 0.000000e+00, align 8
+
+; x/r1/r2 all in the same block.
+define void @bb_constraint_case1(double %a) #0 {
+; CHECK-LABEL: define void @bb_constraint_case1(
+; CHECK-SAME: double [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[TMP1:%.*]] = fdiv fast double 1.000000e+00, [[A]]
+; CHECK-NEXT:    [[DIV:%.*]] = fmul fast double [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    store double [[TMP1]], ptr @r1, align 8
+; CHECK-NEXT:    store double [[TMP0]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call fast  double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %mul = fmul fast double %div, %div
+  store double %mul, ptr @r1, align 8
+  %mul1 = fmul fast double %a, %div
+  store double %mul1, ptr @r2, align 8
+  ret void
+}
+; x/r1 in one block and r2 in other block with conditional guard.
+define void @bb_constraint_case2(double %a, i32 %d) #0 {
+; CHECK-LABEL: define void @bb_constraint_case2(
+; CHECK-SAME: double [[A:%.*]], i32 [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[TMP1:%.*]] = fdiv fast double 1.000000e+00, [[A]]
+; CHECK-NEXT:    [[DIV:%.*]] = fmul fast double [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    store double [[TMP1]], ptr @r1, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[D]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    store double [[TMP0]], ptr @r2, align 8
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %mul = fmul fast double %div, %div
+  store double %mul, ptr @r1, align 8
+  %tobool.not = icmp eq i32 %d, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  %mul1 = fmul fast double %div, %a
+  store double %mul1, ptr @r2, align 8
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; x in one block. r1/r2 in other block and conditionally guarded. Don't optimize.
+define void @bb_constraint_case3(double %a, i32 %d) #0 {
+; CHECK-LABEL: define void @bb_constraint_case3(
+; CHECK-SAME: double [[A:%.*]], i32 [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double 1.000000e+00, [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[D]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[DIV]], [[DIV]]
+; CHECK-NEXT:    store double [[MUL]], ptr @r1, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr @x, align 8
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast double [[TMP1]], [[A]]
+; CHECK-NEXT:    store double [[MUL1]], ptr @r2, align 8
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %tobool = icmp ne i32 %d, 0
+  br i1 %tobool, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  %mul = fmul fast double %div, %div
+  store double %mul, ptr @r1, align 8
+  %1 = load double, ptr @x, align 8
+  %mul1 = fmul fast double %a, %1
+  store double %mul1, ptr @r2, align 8
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  ret void
+}
+
+; x in one block. r1/r2 each in different block and conditionally guarded. Don't optimize.
+define void @bb_constraint_case4(double %a, i32 %c, i32 %d) #0 {
+; CHECK-LABEL: define void @bb_constraint_case4(
+; CHECK-SAME: double [[A:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double 1.000000e+00, [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[DIV]], [[DIV]]
+; CHECK-NEXT:    store double [[MUL]], ptr @r1, align 8
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TOBOOL1_NOT:%.*]] = icmp eq i32 [[D]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL1_NOT]], label [[IF_END4:%.*]], label [[IF_THEN2:%.*]]
+; CHECK:       if.then2:
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr @x, align 8
+; CHECK-NEXT:    [[MUL3:%.*]] = fmul fast double [[TMP1]], [[A]]
+; CHECK-NEXT:    store double [[MUL3]], ptr @r2, align 8
+; CHECK-NEXT:    br label [[IF_END4]]
+; CHECK:       if.end4:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %tobool = icmp ne i32 %c, 0
+  br i1 %tobool, label %if.then, label %if.end
+
+if.then:                                          ; preds = %entry
+  %mul = fmul fast double %div, %div
+  store double %mul, ptr @r1, align 8
+  br label %if.end
+
+if.end:                                           ; preds = %if.then, %entry
+  %tobool1 = icmp ne i32 %d, 0
+  br i1 %tobool1, label %if.then2, label %if.end4
+
+if.then2:                                         ; preds = %if.end
+  %1 = load double, ptr @x, align 8
+  %mul3 = fmul fast double %a, %1
+  store double %mul3, ptr @r2, align 8
+  br label %if.end4
+
+if.end4:                                          ; preds = %if.then2, %if.end
+  ret void
+}
+
+; sqrt value comes from different blocks. Don't optimize.
+define void @bb_constraint_case5(double %a, i32 %c) #0 {
+; CHECK-LABEL: define void @bb_constraint_case5(
+; CHECK-SAME: double [[A:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[TMP0:%.*]] = call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    br label [[IF_END:%.*]]
+; CHECK:       if.else:
+; CHECK-NEXT:    [[ADD:%.*]] = fadd fast double [[A]], 1.000000e+01
+; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.sqrt.f64(double [[ADD]])
+; CHECK-NEXT:    br label [[IF_END]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[DOTPN:%.*]] = phi double [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[IF_ELSE]] ]
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double 1.000000e+00, [[DOTPN]]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[DIV]], [[DIV]]
+; CHECK-NEXT:    store double [[MUL]], ptr @r1, align 8
+; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast double [[DIV]], [[A]]
+; CHECK-NEXT:    store double [[MUL2]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %tobool = icmp ne i32 %c, 0
+  br i1 %tobool, label %if.then, label %if.else
+
+if.then:                                          ; preds = %entry
+  %0 = call fast double @llvm.sqrt.f64(double %a)
+  br label %if.end
+
+if.else:                                          ; preds = %entry
+  %add = fadd fast double %a, 1.000000e+01
+  %1 = call fast double @llvm.sqrt.f64(double %add)
+  br label %if.end
+
+if.end:                                           ; preds = %if.else, %if.then
+  %phi = phi double[ %0, %if.then], [ %1, %if.else]
+  %div = fdiv fast double 1.000000e+00, %phi
+  %mul = fmul fast double %div, %div
+  store double %mul, ptr @r1, align 8
+  %mul2 = fmul fast double %a, %div
+  store double %mul2, ptr @r2, align 8
+  ret void
+}
+
+
+; x in one block and conditionally guarded. r1/r2 in other block. Don't optimize.
+define void @bb_constraint_case6(double %a, i32 %d) #0 {
+; CHECK-LABEL: define void @bb_constraint_case6(
+; CHECK-SAME: double [[A:%.*]], i32 [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[D]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       entry.if.end_crit_edge:
+; CHECK-NEXT:    [[DOTPRE:%.*]] = load double, ptr @x, align 8
+; CHECK-NEXT:    br label [[IF_END1:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double 1.000000e+00, [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    br label [[IF_END1]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ [[DOTPRE]], [[IF_END]] ], [ [[DIV]], [[IF_THEN]] ]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[TMP1]], [[TMP1]]
+; CHECK-NEXT:    store double [[MUL]], ptr @r1, align 8
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast double [[TMP1]], [[A]]
+; CHECK-NEXT:    store double [[MUL1]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %tobool.not = icmp eq i32 %d, 0
+  br i1 %tobool.not, label %entry.if.end_crit_edge, label %if.then
+
+entry.if.end_crit_edge:                           ; preds = %entry
+  %.pre = load double, ptr @x, align 8
+  br label %if.end
+
+if.then:                                          ; preds = %entry
+  %0 = tail call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  br label %if.end
+
+if.end:                                           ; preds = %entry.if.end_crit_edge, %if.then
+  %1 = phi double [ %.pre, %entry.if.end_crit_edge ], [ %div, %if.then ]
+  %mul = fmul fast double %1, %1
+  store double %mul, ptr @r1, align 8
+  %mul1 = fmul fast double %1, %a
+  store double %mul1, ptr @r2, align 8
+  ret void
+}
+
+; r1 comes from different blocks. Don't optimize.
+define void @bb_constraint_case7(double %a, i32 %c, i32 %d) #0 {
+; CHECK-LABEL: define void @bb_constraint_case7(
+; CHECK-SAME: double [[A:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double 1.000000e+00, [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[DIV1:%.*]] = fdiv fast double 3.000000e+00, [[A]]
+; CHECK-NEXT:    br label [[IF_END6:%.*]]
+; CHECK:       if.else:
+; CHECK-NEXT:    [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[D]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL2_NOT]], label [[IF_ELSE5:%.*]], label [[IF_THEN3:%.*]]
+; CHECK:       if.then3:
+; CHECK-NEXT:    [[DIV4:%.*]] = fdiv fast double 2.000000e+00, [[A]]
+; CHECK-NEXT:    br label [[IF_END6]]
+; CHECK:       if.else5:
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[DIV]], [[DIV]]
+; CHECK-NEXT:    br label [[IF_END6]]
+; CHECK:       if.end6:
+; CHECK-NEXT:    [[DIV4_SINK:%.*]] = phi double [ [[DIV4]], [[IF_THEN3]] ], [ [[MUL]], [[IF_ELSE5]] ], [ [[DIV1]], [[IF_THEN]] ]
+; CHECK-NEXT:    store double [[DIV4_SINK]], ptr @r1, align 8
+; CHECK-NEXT:    [[MUL7:%.*]] = fdiv fast double [[A]], [[TMP0]]
+; CHECK-NEXT:    store double [[MUL7]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %tobool.not = icmp eq i32 %c, 0
+  br i1 %tobool.not, label %if.else, label %if.then
+
+if.then:                                          ; preds = %entry
+  %div1 = fdiv fast double 3.000000e+00, %a
+  br label %if.end6
+
+if.else:                                          ; preds = %entry
+  %tobool2.not = icmp eq i32 %d, 0
+  br i1 %tobool2.not, label %if.else5, label %if.then3
+
+if.then3:                                         ; preds = %if.else
+  %div4 = fdiv fast double 2.000000e+00, %a
+  br label %if.end6
+
+if.else5:                                         ; preds = %if.else
+  %mul = fmul fast double %div, %div
+  br label %if.end6
+
+if.end6:                                          ; preds = %if.then3, %if.else5, %if.then
+  %div4.sink = phi double [ %div4, %if.then3 ], [ %mul, %if.else5 ], [ %div1, %if.then ]
+  store double %div4.sink, ptr @r1, align 8
+  %mul7 = fdiv fast double %a, %0
+  store double %mul7, ptr @r2, align 8
+  ret void
+}
+
+; r1 comes from two different blocks(as shown by select ins).
+define void @bb_constraint_case8(double %a, i32 %c) #0 {
+; CHECK-LABEL: define void @bb_constraint_case8(
+; CHECK-SAME: double [[A:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[TMP1:%.*]] = fdiv fast double 1.000000e+00, [[A]]
+; CHECK-NEXT:    [[DIV:%.*]] = fmul fast double [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast double [[A]], [[A]]
+; CHECK-NEXT:    [[STOREMERGE:%.*]] = select i1 [[TOBOOL_NOT]], double [[MUL1]], double [[TMP1]]
+; CHECK-NEXT:    store double [[STOREMERGE]], ptr @r1, align 8
+; CHECK-NEXT:    store double [[TMP0]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %tobool.not = icmp eq i32 %c, 0
+  %mul1 = fmul fast double %a, %a
+  %mul = fmul fast double %div, %div
+  %storemerge = select i1 %tobool.not, double %mul1, double %mul
+  store double %storemerge, ptr @r1, align 8
+  %mul2 = fmul fast double %div, %a
+  store double %mul2, ptr @r2, align 8
+  ret void
+}
+
+; multiple instances of multiply ops to optimize. Optimize all.
+define void @mutiple_multiply_instances(double %a, i32 %c) #0 {
+; CHECK-LABEL: define void @mutiple_multiply_instances(
+; CHECK-SAME: double [[A:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call fast double @llvm.sqrt.f64(double [[A]])
+; CHECK-NEXT:    [[MUL:%.*]] = fdiv fast double 1.000000e+00, [[A]]
+; CHECK-NEXT:    [[DIV:%.*]] = fmul fast double [[MUL]], [[TMP0]]
+; CHECK-NEXT:    store double [[DIV]], ptr @x, align 8
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[C]], 0
+; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast double [[A]], [[A]]
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast double [[A]], [[A]]
+; CHECK-NEXT:    [[MUL_SINK:%.*]] = select i1 [[TOBOOL_NOT]], double [[MUL2]], double [[MUL]]
+; CHECK-NEXT:    [[STOREMERGE:%.*]] = select i1 [[TOBOOL_NOT]], double [[MUL]], double [[MUL1]]
+; CHECK-NEXT:    store double [[MUL_SINK]], ptr @r1, align 8
+; CHECK-NEXT:    store double [[STOREMERGE]], ptr @r3, align 8
+; CHECK-NEXT:    store double [[TMP0]], ptr @r2, align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call fast double @llvm.sqrt.f64(double %a)
+  %div = fdiv fast double 1.000000e+00, %0
+  store double %div, ptr @x, align 8
+  %tobool.not = icmp eq i32 %c, 0
+  %mul2 = fmul fast double %a, %a
+  %mul3 = fmul fast double %div, %div
+  %mul = fmul fast double %div, %div
+  %mul1 = fmul fast double %a, %a
+  %mul.sink = select i1 %tobool.not, double %mul2, double %mul
+  %storemerge = select i1 %tobool.not, double %mul3, double %mul1
+  store double %mul.sink, ptr @r1, align 8
+  store double %storemerge, ptr @r3, align 8
+  %mul4 = fmul fast double %a, %div
+  store double %mul4, ptr @r2, align 8
+  ret void
+}
+
----------------
arsenm wrote:

Where vectorization happens or not does not matter, the code should handle vectors no matter what. You would need to write the strictfp test in terms of the constrained intrinsics, but it would still be good to have the negative test just in case somebody had the idea to make the matchers cover those too 

https://github.com/llvm/llvm-project/pull/87474


More information about the llvm-commits mailing list