[llvm-branch-commits] [llvm] c6035a7 - Remove functions from *-inseltpoison.ll tests if unnecessary

Juneyoung Lee via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Dec 30 06:55:42 PST 2020


Author: Juneyoung Lee
Date: 2020-12-30T23:50:37+09:00
New Revision: c6035a7bdf207dc3c1e12c041d5658770893fc68

URL: https://github.com/llvm/llvm-project/commit/c6035a7bdf207dc3c1e12c041d5658770893fc68
DIFF: https://github.com/llvm/llvm-project/commit/c6035a7bdf207dc3c1e12c041d5658770893fc68.diff

LOG: Remove functions from *-inseltpoison.ll tests if unnecessary

X-inseltpoison.ll is a copy of X.ll with insertelement/shufflevector's
placeholder replaced with poison.
This commit removes a few redundant functions which do not contain any
shufflevector/insertelement.

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/assume-inseltpoison.ll
    llvm/test/Transforms/InstCombine/bswap-inseltpoison.ll
    llvm/test/Transforms/InstCombine/fmul-inseltpoison.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/assume-inseltpoison.ll b/llvm/test/Transforms/InstCombine/assume-inseltpoison.ll
index 8c04c4af28ced..f49755570ddc9 100644
--- a/llvm/test/Transforms/InstCombine/assume-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/assume-inseltpoison.ll
@@ -4,368 +4,10 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
+; A copy of assume.ll, with undef at insertelement/shufflevector replaced with
+; poison.
 declare void @llvm.assume(i1) #1
 
-; Check that the alignment has been upgraded and that the assume has not
-; been removed:
-
-define i32 @foo1(i32* %a) #0 {
-; CHECK-LABEL: @foo1(
-; CHECK-NEXT:    [[T0:%.*]] = load i32, i32* [[A:%.*]], align 32
-; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
-; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
-; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK-NEXT:    ret i32 [[T0]]
-;
-  %t0 = load i32, i32* %a, align 4
-  %ptrint = ptrtoint i32* %a to i64
-  %maskedptr = and i64 %ptrint, 31
-  %maskcond = icmp eq i64 %maskedptr, 0
-  tail call void @llvm.assume(i1 %maskcond)
-  ret i32 %t0
-}
-
-; Same check as in @foo1, but make sure it works if the assume is first too.
-
-define i32 @foo2(i32* %a) #0 {
-; CHECK-LABEL: @foo2(
-; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64
-; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
-; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK-NEXT:    [[T0:%.*]] = load i32, i32* [[A]], align 32
-; CHECK-NEXT:    ret i32 [[T0]]
-;
-  %ptrint = ptrtoint i32* %a to i64
-  %maskedptr = and i64 %ptrint, 31
-  %maskcond = icmp eq i64 %maskedptr, 0
-  tail call void @llvm.assume(i1 %maskcond)
-  %t0 = load i32, i32* %a, align 4
-  ret i32 %t0
-}
-
-define i32 @simple(i32 %a) #1 {
-; CHECK-LABEL: @simple(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 4
-;
-  %cmp = icmp eq i32 %a, 4
-  tail call void @llvm.assume(i1 %cmp)
-  ret i32 %a
-}
-
-define i32 @can1(i1 %a, i1 %b, i1 %c) {
-; CHECK-LABEL: @can1(
-; CHECK-NEXT:    call void @llvm.assume(i1 [[A:%.*]])
-; CHECK-NEXT:    call void @llvm.assume(i1 [[B:%.*]])
-; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
-; CHECK-NEXT:    ret i32 5
-;
-  %and1 = and i1 %a, %b
-  %and  = and i1 %and1, %c
-  tail call void @llvm.assume(i1 %and)
-  ret i32 5
-}
-
-define i32 @can2(i1 %a, i1 %b, i1 %c) {
-; CHECK-LABEL: @can2(
-; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT:    ret i32 5
-;
-  %v = or i1 %a, %b
-  %w = xor i1 %v, 1
-  tail call void @llvm.assume(i1 %w)
-  ret i32 5
-}
-
-define i32 @bar1(i32 %a) #0 {
-; CHECK-LABEL: @bar1(
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 1
-;
-  %and1 = and i32 %a, 3
-  %and = and i32 %a, 7
-  %cmp = icmp eq i32 %and, 1
-  tail call void @llvm.assume(i1 %cmp)
-  ret i32 %and1
-}
-
-define i32 @bar2(i32 %a) #0 {
-; CHECK-LABEL: @bar2(
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 1
-;
-  %and = and i32 %a, 7
-  %cmp = icmp eq i32 %and, 1
-  tail call void @llvm.assume(i1 %cmp)
-  %and1 = and i32 %a, 3
-  ret i32 %and1
-}
-
-define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 {
-; CHECK-LABEL: @bar3(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[X:%.*]])
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[Y:%.*]])
-; CHECK-NEXT:    ret i32 1
-;
-entry:
-  %and1 = and i32 %a, 3
-
-; Don't be fooled by other assumes around.
-
-  tail call void @llvm.assume(i1 %x)
-
-  %and = and i32 %a, 7
-  %cmp = icmp eq i32 %and, 1
-  tail call void @llvm.assume(i1 %cmp)
-
-  tail call void @llvm.assume(i1 %y)
-
-  ret i32 %and1
-}
-
-define i32 @bar4(i32 %a, i32 %b) {
-; CHECK-LABEL: @bar4(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[A]], [[B:%.*]]
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
-; CHECK-NEXT:    ret i32 1
-;
-entry:
-  %and1 = and i32 %b, 3
-  %and = and i32 %a, 7
-  %cmp = icmp eq i32 %and, 1
-  tail call void @llvm.assume(i1 %cmp)
-  %cmp2 = icmp eq i32 %a, %b
-  tail call void @llvm.assume(i1 %cmp2)
-  ret i32 %and1
-}
-
-define i32 @icmp1(i32 %a) #0 {
-; CHECK-LABEL: @icmp1(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 1
-;
-  %cmp = icmp sgt i32 %a, 5
-  tail call void @llvm.assume(i1 %cmp)
-  %conv = zext i1 %cmp to i32
-  ret i32 %conv
-}
-
-define i32 @icmp2(i32 %a) #0 {
-; CHECK-LABEL: @icmp2(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 0
-;
-  %cmp = icmp sgt i32 %a, 5
-  tail call void @llvm.assume(i1 %cmp)
-  %t0 = zext i1 %cmp to i32
-  %lnot.ext = xor i32 %t0, 1
-  ret i32 %lnot.ext
-}
-
-; If the 'not' of a condition is known true, then the condition must be false.
-
-define i1 @assume_not(i1 %cond) {
-; CHECK-LABEL: @assume_not(
-; CHECK-NEXT:    [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
-; CHECK-NEXT:    call void @llvm.assume(i1 [[NOTCOND]])
-; CHECK-NEXT:    ret i1 false
-;
-  %notcond = xor i1 %cond, true
-  call void @llvm.assume(i1 %notcond)
-  ret i1 %cond
-}
-
-declare void @escape(i32* %a)
-
-; Canonicalize a nonnull assumption on a load into metadata form.
-
-define i32 @bundle1(i32* %P) {
-; CHECK-LABEL: @bundle1(
-; CHECK-NEXT:    tail call void @llvm.assume(i1 true) [ "nonnull"(i32* [[P:%.*]]) ]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    ret i32 [[LOAD]]
-;
-  tail call void @llvm.assume(i1 true) ["nonnull"(i32* %P)]
-  %load = load i32, i32* %P
-  ret i32 %load
-}
-
-define i32 @bundle2(i32* %P) {
-; CHECK-LABEL: @bundle2(
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    ret i32 [[LOAD]]
-;
-  tail call void @llvm.assume(i1 true) ["ignore"(i32* undef)]
-  %load = load i32, i32* %P
-  ret i32 %load
-}
-
-define i1 @nonnull1(i32** %a) {
-; CHECK-LABEL: @nonnull1(
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8, !nonnull !6
-; CHECK-NEXT:    tail call void @escape(i32* nonnull [[LOAD]])
-; CHECK-NEXT:    ret i1 false
-;
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  tail call void @llvm.assume(i1 %cmp)
-  tail call void @escape(i32* %load)
-  %rval = icmp eq i32* %load, null
-  ret i1 %rval
-}
-
-; Make sure the above canonicalization applies only
-; to pointer types.  Doing otherwise would be illegal.
-
-define i1 @nonnull2(i32* %a) {
-; CHECK-LABEL: @nonnull2(
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, i32* [[A:%.*]], align 4
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i1 false
-;
-  %load = load i32, i32* %a
-  %cmp = icmp ne i32 %load, 0
-  tail call void @llvm.assume(i1 %cmp)
-  %rval = icmp eq i32 %load, 0
-  ret i1 %rval
-}
-
-; Make sure the above canonicalization does not trigger
-; if the assume is control dependent on something else
-
-define i1 @nonnull3(i32** %a, i1 %control) {
-; CHECK-LABEL: @nonnull3(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
-; CHECK:       taken:
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i1 false
-; CHECK:       not_taken:
-; CHECK-NEXT:    [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
-; CHECK-NEXT:    ret i1 [[RVAL_2]]
-;
-entry:
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  br i1 %control, label %taken, label %not_taken
-taken:
-  tail call void @llvm.assume(i1 %cmp)
-  %rval = icmp eq i32* %load, null
-  ret i1 %rval
-not_taken:
-  %rval.2 = icmp sgt i32* %load, null
-  ret i1 %rval.2
-}
-
-; Make sure the above canonicalization does not trigger
-; if the path from the load to the assume is potentially
-; interrupted by an exception being thrown
-
-define i1 @nonnull4(i32** %a) {
-; CHECK-LABEL: @nonnull4(
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    tail call void @escape(i32* [[LOAD]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i1 false
-;
-  %load = load i32*, i32** %a
-  ;; This call may throw!
-  tail call void @escape(i32* %load)
-  %cmp = icmp ne i32* %load, null
-  tail call void @llvm.assume(i1 %cmp)
-  %rval = icmp eq i32* %load, null
-  ret i1 %rval
-}
-define i1 @nonnull5(i32** %a) {
-; CHECK-LABEL: @nonnull5(
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    tail call void @escape(i32* [[LOAD]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32* [[LOAD]], null
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i1 false
-;
-  %load = load i32*, i32** %a
-  ;; This call may throw!
-  tail call void @escape(i32* %load)
-  %integral = ptrtoint i32* %load to i64
-  %cmp = icmp slt i64 %integral, 0
-  tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set
-  %rval = icmp eq i32* %load, null
-  ret i1 %rval
-}
-
-; PR35846 - https://bugs.llvm.org/show_bug.cgi?id=35846
-
-define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
-; CHECK-LABEL: @assumption_conflicts_with_known_bits(
-; CHECK-NEXT:    [[AND1:%.*]] = and i32 [[B:%.*]], 3
-; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[AND1]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
-; CHECK-NEXT:    ret i32 0
-;
-  %and1 = and i32 %b, 3
-  %B1 = lshr i32 %and1, %and1
-  %B3 = shl nuw nsw i32 %and1, %B1
-  %cmp = icmp eq i32 %B3, 1
-  tail call void @llvm.assume(i1 %cmp)
-  %cmp2 = icmp eq i32 %B1, %B3
-  tail call void @llvm.assume(i1 %cmp2)
-  ret i32 %and1
-}
-
-; PR37726 - https://bugs.llvm.org/show_bug.cgi?id=37726
-; There's a loophole in eliminating a redundant assumption when
-; we have conflicting assumptions. Verify that debuginfo doesn't
-; get in the way of the fold.
-
-define void @debug_interference(i8 %x) {
-; CHECK-LABEL: @debug_interference(
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
-; CHECK-NEXT:    tail call void @llvm.dbg.value(metadata i32 5, [[META7:metadata !.*]], metadata !DIExpression()), [[DBG9:!dbg !.*]]
-; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
-; CHECK-NEXT:    tail call void @llvm.dbg.value(metadata i32 5, [[META7]], metadata !DIExpression()), [[DBG9]]
-; CHECK-NEXT:    tail call void @llvm.dbg.value(metadata i32 5, [[META7]], metadata !DIExpression()), [[DBG9]]
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
-; CHECK-NEXT:    ret void
-;
-  %cmp1 = icmp eq i8 %x, 0
-  %cmp2 = icmp ne i8 %x, 0
-  tail call void @llvm.assume(i1 %cmp1)
-  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
-  tail call void @llvm.assume(i1 %cmp1)
-  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
-  tail call void @llvm.assume(i1 %cmp2)
-  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
-  tail call void @llvm.assume(i1 %cmp2)
-  ret void
-}
-
 ; This would crash.
 ; Does it ever make sense to peek through a bitcast of the icmp operand?
 
@@ -383,274 +25,3 @@ define i32 @PR40940(<4 x i8> %x) {
   call void @llvm.assume(i1 %t3)
   ret i32 %t2
 }
-
-define i1 @nonnull3A(i32** %a, i1 %control) {
-; CHECK-LABEL: @nonnull3A(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
-; CHECK:       taken:
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i1 true
-; CHECK:       not_taken:
-; CHECK-NEXT:    [[RVAL_2:%.*]] = icmp sgt i32* [[LOAD]], null
-; CHECK-NEXT:    ret i1 [[RVAL_2]]
-;
-entry:
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  br i1 %control, label %taken, label %not_taken
-taken:
-  call void @llvm.assume(i1 %cmp)
-  ret i1 %cmp
-not_taken:
-  call void @llvm.assume(i1 %cmp)
-  %rval.2 = icmp sgt i32* %load, null
-  ret i1 %rval.2
-}
-
-define i1 @nonnull3B(i32** %a, i1 %control) {
-; CHECK-LABEL: @nonnull3B(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
-; CHECK:       taken:
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(i32* [[LOAD]]), "nonnull"(i1 [[CMP]]) ]
-; CHECK-NEXT:    ret i1 true
-; CHECK:       not_taken:
-; CHECK-NEXT:    ret i1 [[CONTROL]]
-;
-entry:
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  br i1 %control, label %taken, label %not_taken
-taken:
-  call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
-  ret i1 %cmp
-not_taken:
-  call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
-  ret i1 %control
-}
-
-declare i1 @tmp1(i1)
-
-define i1 @nonnull3C(i32** %a, i1 %control) {
-; CHECK-LABEL: @nonnull3C(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
-; CHECK:       taken:
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
-; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       exit:
-; CHECK-NEXT:    ret i1 [[CMP2]]
-; CHECK:       not_taken:
-; CHECK-NEXT:    ret i1 [[CONTROL]]
-;
-entry:
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  br i1 %control, label %taken, label %not_taken
-taken:
-  %cmp2 = call i1 @tmp1(i1 %cmp)
-  br label %exit
-exit:
-  ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
-  call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp)]
-  ret i1 %cmp2
-not_taken:
-  call void @llvm.assume(i1 %cmp)
-  ret i1 %control
-}
-
-define i1 @nonnull3D(i32** %a, i1 %control) {
-; CHECK-LABEL: @nonnull3D(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
-; CHECK:       taken:
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32*, i32** [[A:%.*]], align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LOAD]], null
-; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
-; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       exit:
-; CHECK-NEXT:    ret i1 [[CMP2]]
-; CHECK:       not_taken:
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "ignore"(i32* undef), "ignore"(i1 undef), "nonnull"(i1 [[CONTROL]]) ]
-; CHECK-NEXT:    ret i1 [[CONTROL]]
-;
-entry:
-  %load = load i32*, i32** %a
-  %cmp = icmp ne i32* %load, null
-  br i1 %control, label %taken, label %not_taken
-taken:
-  %cmp2 = call i1 @tmp1(i1 %cmp)
-  br label %exit
-exit:
-  ret i1 %cmp2
-not_taken:
-  call void @llvm.assume(i1 %cmp) ["nonnull"(i32* %load), "nonnull"(i1 %cmp), "nonnull"(i1 %control)]
-  ret i1 %control
-}
-
-
-define void @always_true_assumption() {
-; CHECK-LABEL: @always_true_assumption(
-; CHECK-NEXT:    ret void
-;
-  call void @llvm.assume(i1 true)
-  ret void
-}
-
-; The alloca guarantees that the low bits of %a are zero because of alignment.
-; The assume says the opposite. Make sure we don't crash.
-
-define i64 @PR31809() {
-; CHECK-LABEL: @PR31809(
-; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[T1:%.*]] = ptrtoint i32* [[A]] to i64
-; CHECK-NEXT:    call void @llvm.assume(i1 false)
-; CHECK-NEXT:    ret i64 [[T1]]
-;
-  %a = alloca i32
-  %t1 = ptrtoint i32* %a to i64
-  %cond = icmp eq i64 %t1, 3
-  call void @llvm.assume(i1 %cond)
-  ret i64 %t1
-}
-
-; Similar to above: there's no way to know which assumption is truthful,
-; so just don't crash.
-
-define i8 @conflicting_assumptions(i8 %x){
-; CHECK-LABEL: @conflicting_assumptions(
-; CHECK-NEXT:    call void @llvm.assume(i1 false)
-; CHECK-NEXT:    [[COND2:%.*]] = icmp eq i8 [[X:%.*]], 4
-; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
-; CHECK-NEXT:    ret i8 5
-;
-  %add = add i8 %x, 1
-  %cond1 = icmp eq i8 %x, 3
-  call void @llvm.assume(i1 %cond1)
-  %cond2 = icmp eq i8 %x, 4
-  call void @llvm.assume(i1 %cond2)
-  ret i8 %add
-}
-
-; Another case of conflicting assumptions. This would crash because we'd
-; try to set more known bits than existed in the known bits struct.
-
-define void @PR36270(i32 %b) {
-; CHECK-LABEL: @PR36270(
-; CHECK-NEXT:    unreachable
-;
-  %B7 = xor i32 -1, 2147483647
-  %and1 = and i32 %b, 3
-  %B12 = lshr i32 %B7, %and1
-  %C1 = icmp ult i32 %and1, %B12
-  tail call void @llvm.assume(i1 %C1)
-  %cmp2 = icmp eq i32 0, %B12
-  tail call void @llvm.assume(i1 %cmp2)
-  unreachable
-}
-
-; PR47416
-
-define i32 @unreachable_assume(i32 %x, i32 %y) {
-; CHECK-LABEL: @unreachable_assume(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
-; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
-; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
-; CHECK:       if:
-; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
-; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
-; CHECK-NEXT:    br label [[EXIT]]
-; CHECK:       exit:
-; CHECK-NEXT:    unreachable
-;
-entry:
-  %cmp0 = icmp sgt i32 %x, 1
-  %cmp1 = icmp eq i32 %y, 1
-  %or = or i1 %cmp0, %cmp1
-  tail call void @llvm.assume(i1 %or)
-  %cmp2 = icmp eq i32 %x, 1
-  br i1 %cmp2, label %if, label %exit
-
-if:
-  %a = and i32 %y, -2
-  %cmp3 = icmp ne i32 %a, 104
-  tail call void @llvm.assume(i1 %cmp3)
-  br label %exit
-
-exit:
-  %cmp4 = icmp eq i32 %x, 2
-  tail call void @llvm.assume(i1 %cmp4)
-  unreachable
-}
-
-define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, i32* %p) {
-; CHECK-LABEL: @unreachable_assumes_and_store(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
-; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
-; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
-; CHECK:       if:
-; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
-; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
-; CHECK-NEXT:    br label [[EXIT]]
-; CHECK:       exit:
-; CHECK-NEXT:    unreachable
-;
-entry:
-  %cmp0 = icmp sgt i32 %x, 1
-  %cmp1 = icmp eq i32 %y, 1
-  %or = or i1 %cmp0, %cmp1
-  tail call void @llvm.assume(i1 %or)
-  %cmp2 = icmp eq i32 %x, 1
-  br i1 %cmp2, label %if, label %exit
-
-if:
-  %a = and i32 %y, -2
-  %cmp3 = icmp ne i32 %a, 104
-  tail call void @llvm.assume(i1 %cmp3)
-  br label %exit
-
-exit:
-  %cmp4 = icmp eq i32 %x, 2
-  tail call void @llvm.assume(i1 %cmp4)
-  %cmp5 = icmp ugt i32 %y, 42
-  tail call void @llvm.assume(i1 %cmp5)
-  store i32 %x, i32* %p
-  unreachable
-}
-
-declare void @llvm.dbg.value(metadata, metadata, metadata)
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!5, !6, !7, !8}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "Me", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: null, retainedTypes: null, imports: null)
-!1 = !DILocalVariable(name: "", arg: 1, scope: !2, file: null, line: 1, type: null)
-!2 = distinct !DISubprogram(name: "debug", linkageName: "debug", scope: null, file: null, line: 0, type: null, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
-!3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
-!5 = !{i32 2, !"Dwarf Version", i32 4}
-!6 = !{i32 2, !"Debug Info Version", i32 3}
-!7 = !{i32 1, !"wchar_size", i32 4}
-!8 = !{i32 7, !"PIC Level", i32 2}
-!9 = !DILocation(line: 0, column: 0, scope: !2)
-
-
-attributes #0 = { nounwind uwtable }
-attributes #1 = { nounwind }
-

diff  --git a/llvm/test/Transforms/InstCombine/bswap-inseltpoison.ll b/llvm/test/Transforms/InstCombine/bswap-inseltpoison.ll
index 3730496ba6e33..7d5bcbed1447d 100644
--- a/llvm/test/Transforms/InstCombine/bswap-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/bswap-inseltpoison.ll
@@ -3,629 +3,8 @@
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
 
-define i32 @test1(i32 %i) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT:    [[T12:%.*]] = call i32 @llvm.bswap.i32(i32 [[I:%.*]])
-; CHECK-NEXT:    ret i32 [[T12]]
-;
-  %t1 = lshr i32 %i, 24
-  %t3 = lshr i32 %i, 8
-  %t4 = and i32 %t3, 65280
-  %t5 = or i32 %t1, %t4
-  %t7 = shl i32 %i, 8
-  %t8 = and i32 %t7, 16711680
-  %t9 = or i32 %t5, %t8
-  %t11 = shl i32 %i, 24
-  %t12 = or i32 %t9, %t11
-  ret i32 %t12
-}
-
-define <2 x i32> @test1_vector(<2 x i32> %i) {
-; CHECK-LABEL: @test1_vector(
-; CHECK-NEXT:    [[T12:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[I:%.*]])
-; CHECK-NEXT:    ret <2 x i32> [[T12]]
-;
-  %t1 = lshr <2 x i32> %i, <i32 24, i32 24>
-  %t3 = lshr <2 x i32> %i, <i32 8, i32 8>
-  %t4 = and <2 x i32> %t3, <i32 65280, i32 65280>
-  %t5 = or <2 x i32> %t1, %t4
-  %t7 = shl <2 x i32> %i, <i32 8, i32 8>
-  %t8 = and <2 x i32> %t7, <i32 16711680, i32 16711680>
-  %t9 = or <2 x i32> %t5, %t8
-  %t11 = shl <2 x i32> %i, <i32 24, i32 24>
-  %t12 = or <2 x i32> %t9, %t11
-  ret <2 x i32> %t12
-}
-
-define i32 @test2(i32 %arg) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[T14:%.*]] = call i32 @llvm.bswap.i32(i32 [[ARG:%.*]])
-; CHECK-NEXT:    ret i32 [[T14]]
-;
-  %t2 = shl i32 %arg, 24
-  %t4 = shl i32 %arg, 8
-  %t5 = and i32 %t4, 16711680
-  %t6 = or i32 %t2, %t5
-  %t8 = lshr i32 %arg, 8
-  %t9 = and i32 %t8, 65280
-  %t10 = or i32 %t6, %t9
-  %t12 = lshr i32 %arg, 24
-  %t14 = or i32 %t10, %t12
-  ret i32 %t14
-}
-
-define <2 x i32> @test2_vector(<2 x i32> %arg) {
-; CHECK-LABEL: @test2_vector(
-; CHECK-NEXT:    [[T14:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[ARG:%.*]])
-; CHECK-NEXT:    ret <2 x i32> [[T14]]
-;
-  %t2 = shl <2 x i32> %arg, <i32 24, i32 24>
-  %t4 = shl <2 x i32> %arg, <i32 8, i32 8>
-  %t5 = and <2 x i32> %t4, <i32 16711680, i32 16711680>
-  %t6 = or <2 x i32> %t2, %t5
-  %t8 = lshr <2 x i32> %arg, <i32 8, i32 8>
-  %t9 = and <2 x i32> %t8, <i32 65280, i32 65280>
-  %t10 = or <2 x i32> %t6, %t9
-  %t12 = lshr <2 x i32> %arg, <i32 24, i32 24>
-  %t14 = or <2 x i32> %t10, %t12
-  ret <2 x i32> %t14
-}
-
-define <2 x i32> @test2_vector_undef(<2 x i32> %arg) {
-; CHECK-LABEL: @test2_vector_undef(
-; CHECK-NEXT:    [[T2:%.*]] = shl <2 x i32> [[ARG:%.*]], <i32 24, i32 undef>
-; CHECK-NEXT:    [[T4:%.*]] = shl <2 x i32> [[ARG]], <i32 8, i32 8>
-; CHECK-NEXT:    [[T5:%.*]] = and <2 x i32> [[T4]], <i32 16711680, i32 undef>
-; CHECK-NEXT:    [[T6:%.*]] = or <2 x i32> [[T2]], [[T5]]
-; CHECK-NEXT:    [[T8:%.*]] = lshr <2 x i32> [[ARG]], <i32 8, i32 8>
-; CHECK-NEXT:    [[T9:%.*]] = and <2 x i32> [[T8]], <i32 65280, i32 undef>
-; CHECK-NEXT:    [[T10:%.*]] = or <2 x i32> [[T6]], [[T9]]
-; CHECK-NEXT:    [[T12:%.*]] = lshr <2 x i32> [[ARG]], <i32 24, i32 undef>
-; CHECK-NEXT:    [[T14:%.*]] = or <2 x i32> [[T10]], [[T12]]
-; CHECK-NEXT:    ret <2 x i32> [[T14]]
-;
-  %t2 = shl <2 x i32> %arg, <i32 24, i32 undef>
-  %t4 = shl <2 x i32> %arg, <i32 8, i32 8>
-  %t5 = and <2 x i32> %t4, <i32 16711680, i32 undef>
-  %t6 = or <2 x i32> %t2, %t5
-  %t8 = lshr <2 x i32> %arg, <i32 8, i32 8>
-  %t9 = and <2 x i32> %t8, <i32 65280, i32 undef>
-  %t10 = or <2 x i32> %t6, %t9
-  %t12 = lshr <2 x i32> %arg, <i32 24, i32 undef>
-  %t14 = or <2 x i32> %t10, %t12
-  ret <2 x i32> %t14
-}
-
-define i16 @test3(i16 %s) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[T5:%.*]] = call i16 @llvm.bswap.i16(i16 [[S:%.*]])
-; CHECK-NEXT:    ret i16 [[T5]]
-;
-  %t2 = lshr i16 %s, 8
-  %t4 = shl i16 %s, 8
-  %t5 = or i16 %t2, %t4
-  ret i16 %t5
-}
-
-define <2 x i16> @test3_vector(<2 x i16> %s) {
-; CHECK-LABEL: @test3_vector(
-; CHECK-NEXT:    [[T5:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[S:%.*]])
-; CHECK-NEXT:    ret <2 x i16> [[T5]]
-;
-  %t2 = lshr <2 x i16> %s, <i16 8, i16 8>
-  %t4 = shl <2 x i16> %s, <i16 8, i16 8>
-  %t5 = or <2 x i16> %t2, %t4
-  ret <2 x i16> %t5
-}
-
-define <2 x i16> @test3_vector_undef(<2 x i16> %s) {
-; CHECK-LABEL: @test3_vector_undef(
-; CHECK-NEXT:    [[T5:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[S:%.*]])
-; CHECK-NEXT:    ret <2 x i16> [[T5]]
-;
-  %t2 = lshr <2 x i16> %s, <i16 undef, i16 8>
-  %t4 = shl <2 x i16> %s, <i16 8, i16 undef>
-  %t5 = or <2 x i16> %t2, %t4
-  ret <2 x i16> %t5
-}
-
-define i16 @test4(i16 %s) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[T5:%.*]] = call i16 @llvm.bswap.i16(i16 [[S:%.*]])
-; CHECK-NEXT:    ret i16 [[T5]]
-;
-  %t2 = lshr i16 %s, 8
-  %t4 = shl i16 %s, 8
-  %t5 = or i16 %t4, %t2
-  ret i16 %t5
-}
-
-define <2 x i16> @test4_vector(<2 x i16> %s) {
-; CHECK-LABEL: @test4_vector(
-; CHECK-NEXT:    [[T5:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[S:%.*]])
-; CHECK-NEXT:    ret <2 x i16> [[T5]]
-;
-  %t2 = lshr <2 x i16> %s, <i16 8, i16 8>
-  %t4 = shl <2 x i16> %s, <i16 8, i16 8>
-  %t5 = or <2 x i16> %t4, %t2
-  ret <2 x i16> %t5
-}
-
-define i16 @test5(i16 %a) {
-; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[T_UPGRD_3:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    ret i16 [[T_UPGRD_3]]
-;
-  %t = zext i16 %a to i32
-  %t1 = and i32 %t, 65280
-  %t2 = ashr i32 %t1, 8
-  %t2.upgrd.1 = trunc i32 %t2 to i16
-  %t4 = and i32 %t, 255
-  %t5 = shl i32 %t4, 8
-  %t5.upgrd.2 = trunc i32 %t5 to i16
-  %t.upgrd.3 = or i16 %t2.upgrd.1, %t5.upgrd.2
-  %t6 = bitcast i16 %t.upgrd.3 to i16
-  %t6.upgrd.4 = zext i16 %t6 to i32
-  %retval = trunc i32 %t6.upgrd.4 to i16
-  ret i16 %retval
-}
-
-define <2 x i16> @test5_vector(<2 x i16> %a) {
-; CHECK-LABEL: @test5_vector(
-; CHECK-NEXT:    [[T_UPGRD_3:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[A:%.*]])
-; CHECK-NEXT:    ret <2 x i16> [[T_UPGRD_3]]
-;
-  %t = zext <2 x i16> %a to <2 x i32>
-  %t1 = and <2 x i32> %t, <i32 65280, i32 65280>
-  %t2 = ashr <2 x i32> %t1, <i32 8, i32 8>
-  %t2.upgrd.1 = trunc <2 x i32> %t2 to <2 x i16>
-  %t4 = and <2 x i32> %t, <i32 255, i32 255>
-  %t5 = shl <2 x i32> %t4, <i32 8, i32 8>
-  %t5.upgrd.2 = trunc <2 x i32> %t5 to <2 x i16>
-  %t.upgrd.3 = or <2 x i16> %t2.upgrd.1, %t5.upgrd.2
-  %t6 = bitcast <2 x i16> %t.upgrd.3 to <2 x i16>
-  %t6.upgrd.4 = zext <2 x i16> %t6 to <2 x i32>
-  %retval = trunc <2 x i32> %t6.upgrd.4 to <2 x i16>
-  ret <2 x i16> %retval
-}
-
-; PR2842
-define i32 @test6(i32 %x) nounwind readnone {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT:    [[T7:%.*]] = call i32 @llvm.bswap.i32(i32 [[X:%.*]])
-; CHECK-NEXT:    ret i32 [[T7]]
-;
-  %t = shl i32 %x, 16
-  %x.mask = and i32 %x, 65280
-  %t1 = lshr i32 %x, 16
-  %t2 = and i32 %t1, 255
-  %t3 = or i32 %x.mask, %t
-  %t4 = or i32 %t3, %t2
-  %t5 = shl i32 %t4, 8
-  %t6 = lshr i32 %x, 24
-  %t7 = or i32 %t5, %t6
-  ret i32 %t7
-}
-
-define <2 x i32> @test6_vector(<2 x i32> %x) nounwind readnone {
-; CHECK-LABEL: @test6_vector(
-; CHECK-NEXT:    [[T7:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[X:%.*]])
-; CHECK-NEXT:    ret <2 x i32> [[T7]]
-;
-  %t = shl <2 x i32> %x, <i32 16, i32 16>
-  %x.mask = and <2 x i32> %x, <i32 65280, i32 65280>
-  %t1 = lshr <2 x i32> %x, <i32 16, i32 16>
-  %t2 = and <2 x i32> %t1, <i32 255, i32 255>
-  %t3 = or <2 x i32> %x.mask, %t
-  %t4 = or <2 x i32> %t3, %t2
-  %t5 = shl <2 x i32> %t4, <i32 8, i32 8>
-  %t6 = lshr <2 x i32> %x, <i32 24, i32 24>
-  %t7 = or <2 x i32> %t5, %t6
-  ret <2 x i32> %t7
-}
-
-declare void @extra_use(i32)
-
-; swaphalf = (x << 16 | x >> 16)
-; ((swaphalf & 0x00ff00ff) << 8) | ((swaphalf >> 8) & 0x00ff00ff)
-
-define i32 @bswap32_and_first(i32 %x) {
-; CHECK-LABEL: @bswap32_and_first(
-; CHECK-NEXT:    [[BSWAP:%.*]] = call i32 @llvm.bswap.i32(i32 [[X:%.*]])
-; CHECK-NEXT:    ret i32 [[BSWAP]]
-;
-  %shl = shl i32 %x, 16
-  %shr = lshr i32 %x, 16
-  %swaphalf = or i32 %shl, %shr
-  %t = and i32 %swaphalf, 16711935
-  %tshl = shl nuw i32 %t, 8
-  %b = lshr i32 %swaphalf, 8
-  %band = and i32 %b, 16711935
-  %bswap = or i32 %tshl, %band
-  ret i32 %bswap
-}
-
-; Extra use should not prevent matching to bswap.
-; swaphalf = (x << 16 | x >> 16)
-; ((swaphalf & 0x00ff00ff) << 8) | ((swaphalf >> 8) & 0x00ff00ff)
-
-define i32 @bswap32_and_first_extra_use(i32 %x) {
-; CHECK-LABEL: @bswap32_and_first_extra_use(
-; CHECK-NEXT:    [[SWAPHALF:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 16)
-; CHECK-NEXT:    [[T:%.*]] = and i32 [[SWAPHALF]], 16711935
-; CHECK-NEXT:    [[BSWAP:%.*]] = call i32 @llvm.bswap.i32(i32 [[X]])
-; CHECK-NEXT:    call void @extra_use(i32 [[T]])
-; CHECK-NEXT:    ret i32 [[BSWAP]]
-;
-  %shl = shl i32 %x, 16
-  %shr = lshr i32 %x, 16
-  %swaphalf = or i32 %shl, %shr
-  %t = and i32 %swaphalf, 16711935
-  %tshl = shl nuw i32 %t, 8
-  %b = lshr i32 %swaphalf, 8
-  %band = and i32 %b, 16711935
-  %bswap = or i32 %tshl, %band
-  call void @extra_use(i32 %t)
-  ret i32 %bswap
-}
-
-; swaphalf = (x << 16 | x >> 16)
-; ((swaphalf << 8) & 0xff00ff00) | ((swaphalf >> 8) & 0x00ff00ff)
-
-; PR23863
-define i32 @bswap32_shl_first(i32 %x) {
-; CHECK-LABEL: @bswap32_shl_first(
-; CHECK-NEXT:    [[BSWAP:%.*]] = call i32 @llvm.bswap.i32(i32 [[X:%.*]])
-; CHECK-NEXT:    ret i32 [[BSWAP]]
-;
-  %shl = shl i32 %x, 16
-  %shr = lshr i32 %x, 16
-  %swaphalf = or i32 %shl, %shr
-  %t = shl i32 %swaphalf, 8
-  %tand = and i32 %t, -16711936
-  %b = lshr i32 %swaphalf, 8
-  %band = and i32 %b, 16711935
-  %bswap = or i32 %tand, %band
-  ret i32 %bswap
-}
-
-; Extra use should not prevent matching to bswap.
-; swaphalf = (x << 16 | x >> 16)
-; ((swaphalf << 8) & 0xff00ff00) | ((swaphalf >> 8) & 0x00ff00ff)
-
-define i32 @bswap32_shl_first_extra_use(i32 %x) {
-; CHECK-LABEL: @bswap32_shl_first_extra_use(
-; CHECK-NEXT:    [[SWAPHALF:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 16)
-; CHECK-NEXT:    [[T:%.*]] = shl i32 [[SWAPHALF]], 8
-; CHECK-NEXT:    [[BSWAP:%.*]] = call i32 @llvm.bswap.i32(i32 [[X]])
-; CHECK-NEXT:    call void @extra_use(i32 [[T]])
-; CHECK-NEXT:    ret i32 [[BSWAP]]
-;
-  %shl = shl i32 %x, 16
-  %shr = lshr i32 %x, 16
-  %swaphalf = or i32 %shl, %shr
-  %t = shl i32 %swaphalf, 8
-  %tand = and i32 %t, -16711936
-  %b = lshr i32 %swaphalf, 8
-  %band = and i32 %b, 16711935
-  %bswap = or i32 %tand, %band
-  call void @extra_use(i32 %t)
-  ret i32 %bswap
-}
-
-define i16 @test8(i16 %a) {
-; CHECK-LABEL: @test8(
-; CHECK-NEXT:    [[OR:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    ret i16 [[OR]]
-;
-  %conv = zext i16 %a to i32
-  %shr = lshr i16 %a, 8
-  %shl = shl i32 %conv, 8
-  %conv1 = zext i16 %shr to i32
-  %or = or i32 %conv1, %shl
-  %conv2 = trunc i32 %or to i16
-  ret i16 %conv2
-}
-
-define i16 @test9(i16 %a) {
-; CHECK-LABEL: @test9(
-; CHECK-NEXT:    [[OR:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    ret i16 [[OR]]
-;
-  %conv = zext i16 %a to i32
-  %shr = lshr i32 %conv, 8
-  %shl = shl i32 %conv, 8
-  %or = or i32 %shr, %shl
-  %conv2 = trunc i32 %or to i16
-  ret i16 %conv2
-}
-
-define i16 @test10(i32 %a) {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[A:%.*]] to i16
-; CHECK-NEXT:    [[REV:%.*]] = call i16 @llvm.bswap.i16(i16 [[TRUNC]])
-; CHECK-NEXT:    ret i16 [[REV]]
-;
-  %shr1 = lshr i32 %a, 8
-  %and1 = and i32 %shr1, 255
-  %and2 = shl i32 %a, 8
-  %shl1 = and i32 %and2, 65280
-  %or = or i32 %and1, %shl1
-  %conv = trunc i32 %or to i16
-  ret i16 %conv
-}
-
-define <2 x i16> @test10_vector(<2 x i32> %a) {
-; CHECK-LABEL: @test10_vector(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i32> [[A:%.*]] to <2 x i16>
-; CHECK-NEXT:    [[REV:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[TRUNC]])
-; CHECK-NEXT:    ret <2 x i16> [[REV]]
-;
-  %shr1 = lshr <2 x i32> %a, <i32 8, i32 8>
-  %and1 = and <2 x i32> %shr1, <i32 255, i32 255>
-  %and2 = shl <2 x i32> %a, <i32 8, i32 8>
-  %shl1 = and <2 x i32> %and2, <i32 65280, i32 65280>
-  %or = or <2 x i32> %and1, %shl1
-  %conv = trunc <2 x i32> %or to <2 x i16>
-  ret <2 x i16> %conv
-}
-
-define i64 @PR39793_bswap_u64_as_u32(i64 %0) {
-; CHECK-LABEL: @PR39793_bswap_u64_as_u32(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[TMP0:%.*]] to i32
-; CHECK-NEXT:    [[REV:%.*]] = call i32 @llvm.bswap.i32(i32 [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[REV]] to i64
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 24
-  %3 = and i64 %2, 255
-  %4 = lshr i64 %0, 8
-  %5 = and i64 %4, 65280
-  %6 = or i64 %3, %5
-  %7 = shl i64 %0, 8
-  %8 = and i64 %7, 16711680
-  %9 = or i64 %6, %8
-  %10 = shl i64 %0, 24
-  %11 = and i64 %10, 4278190080
-  %12 = or i64 %9, %11
-  ret i64 %12
-}
-
-define i16 @PR39793_bswap_u64_as_u32_trunc(i64 %0) {
-; CHECK-LABEL: @PR39793_bswap_u64_as_u32_trunc(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[TMP0:%.*]] to i32
-; CHECK-NEXT:    [[REV:%.*]] = call i32 @llvm.bswap.i32(i32 [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[REV]] to i16
-; CHECK-NEXT:    ret i16 [[TMP2]]
-;
-  %2 = lshr i64 %0, 24
-  %3 = and i64 %2, 255
-  %4 = lshr i64 %0, 8
-  %5 = and i64 %4, 65280
-  %6 = or i64 %3, %5
-  %7 = shl i64 %0, 8
-  %8 = and i64 %7, 16711680
-  %9 = or i64 %6, %8
-  %10 = shl i64 %0, 24
-  %11 = and i64 %10, 4278190080
-  %12 = or i64 %9, %11
-  %13 = trunc i64 %12 to i16
-  ret i16 %13
-}
-
-define i64 @PR39793_bswap_u64_as_u16(i64 %0) {
-; CHECK-LABEL: @PR39793_bswap_u64_as_u16(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[TMP0:%.*]] to i16
-; CHECK-NEXT:    [[REV:%.*]] = call i16 @llvm.bswap.i16(i16 [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[REV]] to i64
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 8
-  %3 = and i64 %2, 255
-  %4 = shl i64 %0, 8
-  %5 = and i64 %4, 65280
-  %6 = or i64 %3, %5
-  ret i64 %6
-}
-
-define <2 x i64> @PR39793_bswap_u64_as_u16_vector(<2 x i64> %0) {
-; CHECK-LABEL: @PR39793_bswap_u64_as_u16_vector(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i64> [[TMP0:%.*]] to <2 x i16>
-; CHECK-NEXT:    [[REV:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = zext <2 x i16> [[REV]] to <2 x i64>
-; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
-;
-  %2 = lshr <2 x i64> %0, <i64 8, i64 8>
-  %3 = and <2 x i64> %2, <i64 255, i64 255>
-  %4 = shl <2 x i64> %0, <i64 8, i64 8>
-  %5 = and <2 x i64> %4, <i64 65280, i64 65280>
-  %6 = or <2 x i64> %3, %5
-  ret <2 x i64> %6
-}
-
-define i8 @PR39793_bswap_u64_as_u16_trunc(i64 %0) {
-; CHECK-LABEL: @PR39793_bswap_u64_as_u16_trunc(
-; CHECK-NEXT:    [[REV1:%.*]] = lshr i64 [[TMP0:%.*]], 8
-; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[REV1]] to i8
-; CHECK-NEXT:    ret i8 [[TMP2]]
-;
-  %2 = lshr i64 %0, 8
-  %3 = and i64 %2, 255
-  %4 = shl i64 %0, 8
-  %5 = and i64 %4, 65280
-  %6 = or i64 %3, %5
-  %7 = trunc i64 %6 to i8
-  ret i8 %7
-}
-
-define i50 @PR39793_bswap_u50_as_u16(i50 %0) {
-; CHECK-LABEL: @PR39793_bswap_u50_as_u16(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i50 [[TMP0:%.*]] to i16
-; CHECK-NEXT:    [[REV:%.*]] = call i16 @llvm.bswap.i16(i16 [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[REV]] to i50
-; CHECK-NEXT:    ret i50 [[TMP2]]
-;
-  %2 = lshr i50 %0, 8
-  %3 = and i50 %2, 255
-  %4 = shl i50 %0, 8
-  %5 = and i50 %4, 65280
-  %6 = or i50 %3, %5
-  ret i50 %6
-}
-
-define i32 @PR39793_bswap_u32_as_u16(i32 %0) {
-; CHECK-LABEL: @PR39793_bswap_u32_as_u16(
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[TMP0:%.*]] to i16
-; CHECK-NEXT:    [[REV:%.*]] = call i16 @llvm.bswap.i16(i16 [[TRUNC]])
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[REV]] to i32
-; CHECK-NEXT:    ret i32 [[TMP2]]
-;
-  %2 = lshr i32 %0, 8
-  %3 = and i32 %2, 255
-  %4 = shl i32 %0, 8
-  %5 = and i32 %4, 65280
-  %6 = or i32 %3, %5
-  ret i32 %6
-}
-
-define i8 @PR39793_bswap_u32_as_u16_trunc(i32 %0) {
-; CHECK-LABEL: @PR39793_bswap_u32_as_u16_trunc(
-; CHECK-NEXT:    [[REV1:%.*]] = lshr i32 [[TMP0:%.*]], 8
-; CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[REV1]] to i8
-; CHECK-NEXT:    ret i8 [[TMP2]]
-;
-  %2 = lshr i32 %0, 8
-  %3 = and i32 %2, 255
-  %4 = shl i32 %0, 8
-  %5 = and i32 %4, 65280
-  %6 = or i32 %3, %5
-  %7 = trunc i32 %6 to i8
-  ret i8 %7
-}
-
-define i32 @partial_bswap(i32 %x) {
-; CHECK-LABEL: @partial_bswap(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[X:%.*]])
-; CHECK-NEXT:    ret i32 [[TMP1]]
-;
-  %x3 = shl i32 %x, 24
-  %a2 = shl i32 %x, 8
-  %x2 = and i32 %a2, 16711680
-  %x32 = or i32 %x3, %x2
-  %t1 = and i32 %x, -65536
-  %t2 = call i32 @llvm.bswap.i32(i32 %t1)
-  %r = or i32 %x32, %t2
-  ret i32 %r
-}
-declare i32 @llvm.bswap.i32(i32)
-
-define <2 x i32> @partial_bswap_vector(<2 x i32> %x) {
-; CHECK-LABEL: @partial_bswap_vector(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[X:%.*]])
-; CHECK-NEXT:    ret <2 x i32> [[TMP1]]
-;
-  %x3 = shl <2 x i32> %x, <i32 24, i32 24>
-  %a2 = shl <2 x i32> %x, <i32 8, i32 8>
-  %x2 = and <2 x i32> %a2, <i32 16711680, i32 16711680>
-  %x32 = or <2 x i32> %x3, %x2
-  %t1 = and <2 x i32> %x, <i32 -65536, i32 -65536>
-  %t2 = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %t1)
-  %r = or <2 x i32> %x32, %t2
-  ret <2 x i32> %r
-}
-declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
-
-define i16 @partial_bitreverse(i16 %x) {
-; CHECK-LABEL: @partial_bitreverse(
-; CHECK-NEXT:    [[OR:%.*]] = call i16 @llvm.bswap.i16(i16 [[X:%.*]])
-; CHECK-NEXT:    ret i16 [[OR]]
-;
-  %rev= call i16 @llvm.bitreverse.i16(i16 %x)
-  %lo = and i16 %rev, 255
-  %hi = and i16 %rev, -256
-  %revlo = call i16 @llvm.bitreverse.i16(i16 %lo)
-  %revhi = call i16 @llvm.bitreverse.i16(i16 %hi)
-  %newlo = lshr i16 %revlo, 8
-  %newhi = shl  i16 %revhi, 8
-  %or = or i16 %newlo, %newhi
-  ret i16 %or
-}
-declare i16 @llvm.bitreverse.i16(i16)
-
-define i64 @bswap_and_mask_0(i64 %0) {
-; CHECK-LABEL: @bswap_and_mask_0(
-; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP0:%.*]], -72057594037927681
-; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP3]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = shl i64 %0, 56
-  %4 = or i64 %2, %3
-  ret i64 %4
-}
-
-define i64 @bswap_and_mask_1(i64 %0) {
-; CHECK-LABEL: @bswap_and_mask_1(
-; CHECK-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP0:%.*]], 56
-; CHECK-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP0]], 40
-; CHECK-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], 65280
-; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[TMP4]], [[TMP2]]
-; CHECK-NEXT:    ret i64 [[TMP5]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = lshr i64 %0, 40
-  %4 = and i64 %3, 65280
-  %5 = or i64 %4, %2
-  ret i64 %5
-}
-
-define i64 @bswap_and_mask_2(i64 %0) {
-; CHECK-LABEL: @bswap_and_mask_2(
-; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP0:%.*]], -72057594037862401
-; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP3]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = shl i64 %0, 56
-  %4 = or i64 %2, %3
-  %5 = shl i64 %0, 40
-  %6 = and i64 %5, 71776119061217280
-  %7 = or i64 %4, %6
-  ret i64 %7
-}
-
-define i64 @bswap_trunc(i64 %x01234567) {
-; CHECK-LABEL: @bswap_trunc(
-; CHECK-NEXT:    [[X7ZZZZZZZ:%.*]] = shl i64 [[X01234567:%.*]], 56
-; CHECK-NEXT:    [[XZ0123456:%.*]] = lshr i64 [[X01234567]], 8
-; CHECK-NEXT:    [[XZZZZZ012:%.*]] = lshr i64 [[X01234567]], 40
-; CHECK-NEXT:    [[X3456:%.*]] = trunc i64 [[XZ0123456]] to i32
-; CHECK-NEXT:    [[XZ012:%.*]] = trunc i64 [[XZZZZZ012]] to i32
-; CHECK-NEXT:    [[X6543:%.*]] = call i32 @llvm.bswap.i32(i32 [[X3456]])
-; CHECK-NEXT:    [[X210Z:%.*]] = call i32 @llvm.bswap.i32(i32 [[XZ012]])
-; CHECK-NEXT:    [[XZ210:%.*]] = lshr exact i32 [[X210Z]], 8
-; CHECK-NEXT:    [[XZZZZ6543:%.*]] = zext i32 [[X6543]] to i64
-; CHECK-NEXT:    [[XZZZZZ210:%.*]] = zext i32 [[XZ210]] to i64
-; CHECK-NEXT:    [[XZ6543ZZZ:%.*]] = shl nuw nsw i64 [[XZZZZ6543]], 24
-; CHECK-NEXT:    [[XZ6543210:%.*]] = or i64 [[XZ6543ZZZ]], [[XZZZZZ210]]
-; CHECK-NEXT:    [[X76543210:%.*]] = or i64 [[XZ6543210]], [[X7ZZZZZZZ]]
-; CHECK-NEXT:    ret i64 [[X76543210]]
-;
-  %x7zzzzzzz = shl i64 %x01234567, 56
-  %xz0123456 = lshr i64 %x01234567, 8
-  %xzzzzz012 = lshr i64 %x01234567, 40
-  %x3456 = trunc i64 %xz0123456 to i32
-  %xz012 = trunc i64 %xzzzzz012 to i32
-  %x6543 = call i32 @llvm.bswap.i32(i32 %x3456)
-  %x210z = call i32 @llvm.bswap.i32(i32 %xz012)
-  %xz210 = lshr i32 %x210z, 8
-  %xzzzz6543 = zext i32 %x6543 to i64
-  %xzzzzz210 = zext i32 %xz210 to i64
-  %xz6543zzz = shl i64 %xzzzz6543, 24
-  %xz6543210 = or i64 %xzzzzz210, %xz6543zzz
-  %x76543210 = or i64 %xz6543210, %x7zzzzzzz
-  ret i64 %x76543210
-}
+; A copy of bswap.ll, with undef at insertelement/shufflevector replaced with
+; poison
 
 define i32 @shuf_4bytes(<4 x i8> %x) {
 ; CHECK-LABEL: @shuf_4bytes(
@@ -706,162 +85,3 @@ define i32 @shuf_2bytes_widening(<2 x i8> %x) {
 
 declare i32 @llvm.fshl.i32(i32, i32, i32)
 declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define i32 @funnel_unary(i32 %abcd) {
-; CHECK-LABEL: @funnel_unary(
-; CHECK-NEXT:    [[DCBA:%.*]] = call i32 @llvm.bswap.i32(i32 [[ABCD:%.*]])
-; CHECK-NEXT:    ret i32 [[DCBA]]
-;
-  %dabc = call i32 @llvm.fshl.i32(i32 %abcd, i32 %abcd, i32 24)
-  %bcda = call i32 @llvm.fshr.i32(i32 %abcd, i32 %abcd, i32 24)
-  %dzbz = and i32 %dabc, -16711936
-  %zcza = and i32 %bcda,  16711935
-  %dcba = or i32 %dzbz, %zcza
-  ret i32 %dcba
-}
-
-define i32 @funnel_binary(i32 %abcd) {
-; CHECK-LABEL: @funnel_binary(
-; CHECK-NEXT:    [[DCBA:%.*]] = call i32 @llvm.bswap.i32(i32 [[ABCD:%.*]])
-; CHECK-NEXT:    ret i32 [[DCBA]]
-;
-  %cdzz = shl i32 %abcd, 16
-  %dcdz = call i32 @llvm.fshl.i32(i32 %abcd, i32 %cdzz, i32 24)
-  %zzab = lshr i32 %abcd, 16
-  %zaba = call i32 @llvm.fshr.i32(i32 %zzab, i32 %abcd, i32 24)
-  %dczz = and i32 %dcdz, -65536
-  %zzba = and i32 %zaba,  65535
-  %dcba = or i32 %dczz, %zzba
-  ret i32 %dcba
-}
-
-define i32 @funnel_and(i32 %abcd) {
-; CHECK-LABEL: @funnel_and(
-; CHECK-NEXT:    [[DCBA:%.*]] = call i32 @llvm.bswap.i32(i32 [[ABCD:%.*]])
-; CHECK-NEXT:    ret i32 [[DCBA]]
-;
-  %zzcz = and i32 %abcd, 65280
-  %zcza = call i32 @llvm.fshl.i32(i32 %zzcz, i32 %abcd, i32 8)
-  %zbzz = and i32 %abcd, 16711680
-  %dzbz = call i32 @llvm.fshl.i32(i32 %abcd, i32 %zbzz, i32 24)
-  %dcba = or i32 %zcza, %dzbz
-  ret i32 %dcba
-}
-
-; PR47191 - deep IR trees prevent ADD/XOR instructions being simplified to OR.
-
-define i64 @PR47191_problem1(i64 %0) {
-; CHECK-LABEL: @PR47191_problem1(
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP0:%.*]])
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = lshr i64 %0, 40
-  %4 = and i64 %3, 65280
-  %5 = lshr i64 %0, 24
-  %6 = and i64 %5, 16711680
-  %7 = lshr i64 %0, 8
-  %8 = and i64 %7, 4278190080
-  %9 = shl i64 %0, 56
-  %10 = shl i64 %0, 40
-  %11 = and i64 %10, 71776119061217280
-  %12 = shl i64 %0, 24
-  %13 = and i64 %12, 280375465082880
-  %14 = or i64 %9, %2
-  %15 = or i64 %14, %4
-  %16 = or i64 %15, %6
-  %17 = or i64 %16, %8
-  %18 = or i64 %17, %11
-  %19 = or i64 %18, %13
-  %20 = shl i64 %0, 8
-  %21 = and i64 %20, 1095216660480
-  %22 = add i64 %19, %21
-  ret i64 %22
-}
-
-define i64 @PR47191_problem2(i64 %0) {
-; CHECK-LABEL: @PR47191_problem2(
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP0:%.*]])
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = lshr i64 %0, 40
-  %4 = and i64 %3, 65280
-  %5 = lshr i64 %0, 24
-  %6 = and i64 %5, 16711680
-  %7 = lshr i64 %0, 8
-  %8 = and i64 %7, 4278190080
-  %9 = shl i64 %0, 56
-  %10 = shl i64 %0, 40
-  %11 = and i64 %10, 71776119061217280
-  %12 = or i64 %9, %2
-  %13 = or i64 %12, %4
-  %14 = or i64 %13, %6
-  %15 = or i64 %14, %8
-  %16 = or i64 %15, %11
-  %17 = shl i64 %0, 24
-  %18 = and i64 %17, 280375465082880
-  %19 = shl i64 %0, 8
-  %20 = and i64 %19, 1095216660480
-  %21 = or i64 %20, %18
-  %22 = xor i64 %21, %16
-  ret i64 %22
-}
-
-define i64 @PR47191_problem3(i64 %0) {
-; CHECK-LABEL: @PR47191_problem3(
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP0:%.*]])
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = lshr i64 %0, 40
-  %4 = and i64 %3, 65280
-  %5 = lshr i64 %0, 24
-  %6 = and i64 %5, 16711680
-  %7 = lshr i64 %0, 8
-  %8 = and i64 %7, 4278190080
-  %9 = shl i64 %0, 56
-  %10 = shl i64 %0, 40
-  %11 = and i64 %10, 71776119061217280
-  %12 = or i64 %9, %2
-  %13 = or i64 %12, %4
-  %14 = or i64 %13, %6
-  %15 = or i64 %14, %8
-  %16 = or i64 %15, %11
-  %17 = shl i64 %0, 24
-  %18 = and i64 %17, 280375465082880
-  %19 = shl i64 %0, 8
-  %20 = and i64 %19, 1095216660480
-  %21 = or i64 %20, %18
-  %22 = xor i64 %21, %16
-  ret i64 %22
-}
-
-define i64 @PR47191_problem4(i64 %0) {
-; CHECK-LABEL: @PR47191_problem4(
-; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP0:%.*]])
-; CHECK-NEXT:    ret i64 [[TMP2]]
-;
-  %2 = lshr i64 %0, 56
-  %3 = shl i64 %0, 56
-  %4 = or i64 %2, %3
-  %5 = lshr i64 %0, 40
-  %6 = and i64 %5, 65280
-  %7 = or i64 %4, %6
-  %8 = shl i64 %0, 40
-  %9 = and i64 %8, 71776119061217280
-  %10 = or i64 %7, %9
-  %11 = lshr i64 %0, 24
-  %12 = and i64 %11, 16711680
-  %13 = or i64 %10, %12
-  %14 = shl i64 %0, 24
-  %15 = and i64 %14, 280375465082880
-  %16 = or i64 %13, %15
-  %17 = lshr i64 %0, 8
-  %18 = and i64 %17, 4278190080
-  %19 = or i64 %16, %18
-  %20 = shl i64 %0, 8
-  %21 = and i64 %20, 1095216660480
-  %22 = add i64 %19, %21
-  ret i64 %22
-}

diff  --git a/llvm/test/Transforms/InstCombine/fmul-inseltpoison.ll b/llvm/test/Transforms/InstCombine/fmul-inseltpoison.ll
index 083b7a6ad6ad8..41d486856b6a5 100644
--- a/llvm/test/Transforms/InstCombine/fmul-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-inseltpoison.ll
@@ -1,375 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -instcombine < %s | FileCheck %s
 
-; (-0.0 - X) * C => X * -C
-define float @neg_constant(float %x) {
-; CHECK-LABEL: @neg_constant(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul ninf float [[X:%.*]], -2.000000e+01
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub = fsub float -0.0, %x
-  %mul = fmul ninf float %sub, 2.0e+1
-  ret float %mul
-}
-
-define float @unary_neg_constant(float %x) {
-; CHECK-LABEL: @unary_neg_constant(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul ninf float [[X:%.*]], -2.000000e+01
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub = fneg float %x
-  %mul = fmul ninf float %sub, 2.0e+1
-  ret float %mul
-}
-
-define <2 x float> @neg_constant_vec(<2 x float> %x) {
-; CHECK-LABEL: @neg_constant_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul ninf <2 x float> [[X:%.*]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fsub <2 x float> <float -0.0, float -0.0>, %x
-  %mul = fmul ninf <2 x float> %sub, <float 2.0, float 3.0>
-  ret <2 x float> %mul
-}
-
-define <2 x float> @unary_neg_constant_vec(<2 x float> %x) {
-; CHECK-LABEL: @unary_neg_constant_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul ninf <2 x float> [[X:%.*]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fneg <2 x float> %x
-  %mul = fmul ninf <2 x float> %sub, <float 2.0, float 3.0>
-  ret <2 x float> %mul
-}
-
-define <2 x float> @neg_constant_vec_undef(<2 x float> %x) {
-; CHECK-LABEL: @neg_constant_vec_undef(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul ninf <2 x float> [[X:%.*]], <float -2.000000e+00, float -3.000000e+00>
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fsub <2 x float> <float undef, float -0.0>, %x
-  %mul = fmul ninf <2 x float> %sub, <float 2.0, float 3.0>
-  ret <2 x float> %mul
-}
-
-; (0.0 - X) * C => X * -C
-define float @neg_nsz_constant(float %x) {
-; CHECK-LABEL: @neg_nsz_constant(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul nnan float [[X:%.*]], -2.000000e+01
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub = fsub nsz float 0.0, %x
-  %mul = fmul nnan float %sub, 2.0e+1
-  ret float %mul
-}
-
-define float @unary_neg_nsz_constant(float %x) {
-; CHECK-LABEL: @unary_neg_nsz_constant(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul nnan float [[X:%.*]], -2.000000e+01
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub = fneg nsz float %x
-  %mul = fmul nnan float %sub, 2.0e+1
-  ret float %mul
-}
-
-; (-0.0 - X) * (-0.0 - Y) => X * Y
-define float @neg_neg(float %x, float %y) {
-; CHECK-LABEL: @neg_neg(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fsub float -0.0, %x
-  %sub2 = fsub float -0.0, %y
-  %mul = fmul arcp float %sub1, %sub2
-  ret float %mul
-}
-
-define float @unary_neg_unary_neg(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_unary_neg(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fneg float %x
-  %sub2 = fneg float %y
-  %mul = fmul arcp float %sub1, %sub2
-  ret float %mul
-}
-
-define float @unary_neg_neg(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_neg(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fneg float %x
-  %sub2 = fsub float -0.0, %y
-  %mul = fmul arcp float %sub1, %sub2
-  ret float %mul
-}
-
-define float @neg_unary_neg(float %x, float %y) {
-; CHECK-LABEL: @neg_unary_neg(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fsub float -0.0, %x
-  %sub2 = fneg float %y
-  %mul = fmul arcp float %sub1, %sub2
-  ret float %mul
-}
-
-define <2 x float> @neg_neg_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_neg_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub1 = fsub <2 x float> <float -0.0, float -0.0>, %x
-  %sub2 = fsub <2 x float> <float -0.0, float -0.0>, %y
-  %mul = fmul arcp <2 x float> %sub1, %sub2
-  ret <2 x float> %mul
-}
-
-define <2 x float> @unary_neg_unary_neg_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @unary_neg_unary_neg_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub1 = fneg <2 x float> %x
-  %sub2 = fneg <2 x float> %y
-  %mul = fmul arcp <2 x float> %sub1, %sub2
-  ret <2 x float> %mul
-}
-
-define <2 x float> @unary_neg_neg_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @unary_neg_neg_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub1 = fneg <2 x float> %x
-  %sub2 = fsub <2 x float> <float -0.0, float -0.0>, %y
-  %mul = fmul arcp <2 x float> %sub1, %sub2
-  ret <2 x float> %mul
-}
-
-define <2 x float> @neg_unary_neg_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_unary_neg_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub1 = fsub <2 x float> <float -0.0, float -0.0>, %x
-  %sub2 = fneg <2 x float> %y
-  %mul = fmul arcp <2 x float> %sub1, %sub2
-  ret <2 x float> %mul
-}
-
-define <2 x float> @neg_neg_vec_undef(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_neg_vec_undef(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub1 = fsub <2 x float> <float -0.0, float undef>, %x
-  %sub2 = fsub <2 x float> <float undef, float -0.0>, %y
-  %mul = fmul arcp <2 x float> %sub1, %sub2
-  ret <2 x float> %mul
-}
-
-define <2 x float> @unary_neg_neg_vec_undef(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @unary_neg_neg_vec_undef(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %neg = fneg <2 x float> %x
-  %sub = fsub <2 x float> <float undef, float -0.0>, %y
-  %mul = fmul arcp <2 x float> %neg, %sub
-  ret <2 x float> %mul
-}
-
-define <2 x float> @neg_unary_neg_vec_undef(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_unary_neg_vec_undef(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fsub <2 x float> <float -0.0, float undef>, %x
-  %neg = fneg <2 x float> %y
-  %mul = fmul arcp <2 x float> %sub, %neg
-  ret <2 x float> %mul
-}
-
-; (0.0 - X) * (0.0 - Y) => X * Y
-define float @neg_neg_nsz(float %x, float %y) {
-; CHECK-LABEL: @neg_neg_nsz(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fsub nsz float 0.0, %x
-  %sub2 = fsub nsz float 0.0, %y
-  %mul = fmul afn float %sub1, %sub2
-  ret float %mul
-}
-
-declare void @use_f32(float)
-
-define float @neg_neg_multi_use(float %x, float %y) {
-; CHECK-LABEL: @neg_neg_multi_use(
-; CHECK-NEXT:    [[NX:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[NY:%.*]] = fneg float [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[X]], [[Y]]
-; CHECK-NEXT:    call void @use_f32(float [[NX]])
-; CHECK-NEXT:    call void @use_f32(float [[NY]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %nx = fsub float -0.0, %x
-  %ny = fsub float -0.0, %y
-  %mul = fmul afn float %nx, %ny
-  call void @use_f32(float %nx)
-  call void @use_f32(float %ny)
-  ret float %mul
-}
-
-define float @unary_neg_unary_neg_multi_use(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_unary_neg_multi_use(
-; CHECK-NEXT:    [[NX:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[NY:%.*]] = fneg float [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[X]], [[Y]]
-; CHECK-NEXT:    call void @use_f32(float [[NX]])
-; CHECK-NEXT:    call void @use_f32(float [[NY]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %nx = fneg float %x
-  %ny = fneg float %y
-  %mul = fmul afn float %nx, %ny
-  call void @use_f32(float %nx)
-  call void @use_f32(float %ny)
-  ret float %mul
-}
-
-define float @unary_neg_neg_multi_use(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_neg_multi_use(
-; CHECK-NEXT:    [[NX:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[NY:%.*]] = fneg float [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[X]], [[Y]]
-; CHECK-NEXT:    call void @use_f32(float [[NX]])
-; CHECK-NEXT:    call void @use_f32(float [[NY]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %nx = fneg float %x
-  %ny = fsub float -0.0, %y
-  %mul = fmul afn float %nx, %ny
-  call void @use_f32(float %nx)
-  call void @use_f32(float %ny)
-  ret float %mul
-}
-
-define float @neg_unary_neg_multi_use(float %x, float %y) {
-; CHECK-LABEL: @neg_unary_neg_multi_use(
-; CHECK-NEXT:    [[NX:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[NY:%.*]] = fneg float [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[X]], [[Y]]
-; CHECK-NEXT:    call void @use_f32(float [[NX]])
-; CHECK-NEXT:    call void @use_f32(float [[NY]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %nx = fsub float -0.0, %x
-  %ny = fneg float %y
-  %mul = fmul afn float %nx, %ny
-  call void @use_f32(float %nx)
-  call void @use_f32(float %ny)
-  ret float %mul
-}
-
-; (-0.0 - X) * Y
-define float @neg_mul(float %x, float %y) {
-; CHECK-LABEL: @neg_mul(
-; CHECK-NEXT:    [[SUB:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUB]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub = fsub float -0.0, %x
-  %mul = fmul float %sub, %y
-  ret float %mul
-}
-
-define float @unary_neg_mul(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_mul(
-; CHECK-NEXT:    [[NEG:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %neg = fneg float %x
-  %mul = fmul float %neg, %y
-  ret float %mul
-}
-
-define <2 x float> @neg_mul_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_mul_vec(
-; CHECK-NEXT:    [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fsub <2 x float> <float -0.0, float -0.0>, %x
-  %mul = fmul <2 x float> %sub, %y
-  ret <2 x float> %mul
-}
-
-define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @unary_neg_mul_vec(
-; CHECK-NEXT:    [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fneg <2 x float> %x
-  %mul = fmul <2 x float> %sub, %y
-  ret <2 x float> %mul
-}
-
-define <2 x float> @neg_mul_vec_undef(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @neg_mul_vec_undef(
-; CHECK-NEXT:    [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sub = fsub <2 x float> <float undef, float -0.0>, %x
-  %mul = fmul <2 x float> %sub, %y
-  ret <2 x float> %mul
-}
-
-; (0.0 - X) * Y
-define float @neg_sink_nsz(float %x, float %y) {
-; CHECK-LABEL: @neg_sink_nsz(
-; CHECK-NEXT:    [[SUB1:%.*]] = fneg nsz float [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sub1 = fsub nsz float 0.0, %x
-  %mul = fmul float %sub1, %y
-  ret float %mul
-}
-
-define float @neg_sink_multi_use(float %x, float %y) {
-; CHECK-LABEL: @neg_sink_multi_use(
-; CHECK-NEXT:    [[SUB1:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %sub1 = fsub float -0.0, %x
-  %mul = fmul float %sub1, %y
-  %mul2 = fmul float %mul, %sub1
-  ret float %mul2
-}
-
-define float @unary_neg_mul_multi_use(float %x, float %y) {
-; CHECK-LABEL: @unary_neg_mul_multi_use(
-; CHECK-NEXT:    [[SUB1:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %sub1 = fneg float %x
-  %mul = fmul float %sub1, %y
-  %mul2 = fmul float %mul, %sub1
-  ret float %mul2
-}
+; A copy of fmul.ll, with undef at insertelement/shufflevector replaced with
+; poison
 
 ; Don't crash when attempting to cast a constant FMul to an instruction.
 define void @test8(i32* %inout) {
@@ -406,764 +39,6 @@ for.end:                                          ; preds = %for.cond
   ret void
 }
 
-; X * -1.0 => -0.0 - X
-define float @test9(float %x) {
-; CHECK-LABEL: @test9(
-; CHECK-NEXT:    [[MUL:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %mul = fmul float %x, -1.0
-  ret float %mul
-}
-
-; PR18532
-define <4 x float> @test10(<4 x float> %x) {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[MUL:%.*]] = fneg arcp afn <4 x float> [[X:%.*]]
-; CHECK-NEXT:    ret <4 x float> [[MUL]]
-;
-  %mul = fmul arcp afn <4 x float> %x, <float -1.0, float -1.0, float -1.0, float -1.0>
-  ret <4 x float> %mul
-}
-
-define float @test11(float %x, float %y) {
-; CHECK-LABEL: @test11(
-; CHECK-NEXT:    [[B:%.*]] = fadd fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[C:%.*]] = fadd fast float [[B]], 3.000000e+00
-; CHECK-NEXT:    ret float [[C]]
-;
-  %a = fadd fast float %x, 1.0
-  %b = fadd fast float %y, 2.0
-  %c = fadd fast float %a, %b
-  ret float %c
-}
-
-declare double @llvm.sqrt.f64(double)
-
-; With unsafe/fast math, sqrt(X) * sqrt(X) is just X,
-; but make sure another use of the sqrt is intact.
-; Note that the remaining fmul is altered but is not 'fast'
-; itself because it was not marked 'fast' originally.
-; Thus, we have an overall fast result, but no more indication of
-; 'fast'ness in the code.
-define double @sqrt_squared2(double %f) {
-; CHECK-LABEL: @sqrt_squared2(
-; CHECK-NEXT:    [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[F:%.*]])
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul double [[SQRT]], [[F]]
-; CHECK-NEXT:    ret double [[MUL2]]
-;
-  %sqrt = call double @llvm.sqrt.f64(double %f)
-  %mul1 = fmul fast double %sqrt, %sqrt
-  %mul2 = fmul double %mul1, %sqrt
-  ret double %mul2
-}
-
-declare float @llvm.fabs.f32(float) nounwind readnone
-
-define float @fabs_squared(float %x) {
-; CHECK-LABEL: @fabs_squared(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[X:%.*]], [[X]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call float @llvm.fabs.f32(float %x)
-  %mul = fmul float %x.fabs, %x.fabs
-  ret float %mul
-}
-
-define float @fabs_squared_fast(float %x) {
-; CHECK-LABEL: @fabs_squared_fast(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[X:%.*]], [[X]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call float @llvm.fabs.f32(float %x)
-  %mul = fmul fast float %x.fabs, %x.fabs
-  ret float %mul
-}
-
-define float @fabs_fabs(float %x, float %y) {
-; CHECK-LABEL: @fabs_fabs(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call float @llvm.fabs.f32(float %x)
-  %y.fabs = call float @llvm.fabs.f32(float %y)
-  %mul = fmul float %x.fabs, %y.fabs
-  ret float %mul
-}
-
-define float @fabs_fabs_extra_use1(float %x, float %y) {
-; CHECK-LABEL: @fabs_fabs_extra_use1(
-; CHECK-NEXT:    [[X_FABS:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]])
-; CHECK-NEXT:    call void @use_f32(float [[X_FABS]])
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul ninf float [[X]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = call ninf float @llvm.fabs.f32(float [[TMP1]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call float @llvm.fabs.f32(float %x)
-  call void @use_f32(float %x.fabs)
-  %y.fabs = call float @llvm.fabs.f32(float %y)
-  %mul = fmul ninf float %x.fabs, %y.fabs
-  ret float %mul
-}
-
-define float @fabs_fabs_extra_use2(float %x, float %y) {
-; CHECK-LABEL: @fabs_fabs_extra_use2(
-; CHECK-NEXT:    [[Y_FABS:%.*]] = call fast float @llvm.fabs.f32(float [[Y:%.*]])
-; CHECK-NEXT:    call void @use_f32(float [[Y_FABS]])
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc ninf float [[X:%.*]], [[Y]]
-; CHECK-NEXT:    [[MUL:%.*]] = call reassoc ninf float @llvm.fabs.f32(float [[TMP1]])
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call fast float @llvm.fabs.f32(float %x)
-  %y.fabs = call fast float @llvm.fabs.f32(float %y)
-  call void @use_f32(float %y.fabs)
-  %mul = fmul reassoc ninf float %x.fabs, %y.fabs
-  ret float %mul
-}
-
-; negative test - don't create an extra instruction
-
-define float @fabs_fabs_extra_use3(float %x, float %y) {
-; CHECK-LABEL: @fabs_fabs_extra_use3(
-; CHECK-NEXT:    [[X_FABS:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]])
-; CHECK-NEXT:    call void @use_f32(float [[X_FABS]])
-; CHECK-NEXT:    [[Y_FABS:%.*]] = call float @llvm.fabs.f32(float [[Y:%.*]])
-; CHECK-NEXT:    call void @use_f32(float [[Y_FABS]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[X_FABS]], [[Y_FABS]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x.fabs = call float @llvm.fabs.f32(float %x)
-  call void @use_f32(float %x.fabs)
-  %y.fabs = call float @llvm.fabs.f32(float %y)
-  call void @use_f32(float %y.fabs)
-  %mul = fmul float %x.fabs, %y.fabs
-  ret float %mul
-}
-
-; (X*Y) * X => (X*X) * Y
-; The transform only requires 'reassoc', but test other FMF in
-; the commuted variants to make sure FMF propagates as expected.
-
-define float @reassoc_common_operand1(float %x, float %y) {
-; CHECK-LABEL: @reassoc_common_operand1(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc float [[X:%.*]], [[X]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc float [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %mul1 = fmul float %x, %y
-  %mul2 = fmul reassoc float %mul1, %x
-  ret float %mul2
-}
-
-; (Y*X) * X => (X*X) * Y
-
-define float @reassoc_common_operand2(float %x, float %y) {
-; CHECK-LABEL: @reassoc_common_operand2(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[X]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast float [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %mul1 = fmul float %y, %x
-  %mul2 = fmul fast float %mul1, %x
-  ret float %mul2
-}
-
-; X * (X*Y) => (X*X) * Y
-
-define float @reassoc_common_operand3(float %x1, float %y) {
-; CHECK-LABEL: @reassoc_common_operand3(
-; CHECK-NEXT:    [[X:%.*]] = fdiv float [[X1:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan float [[X]], [[X]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc nnan float [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %x = fdiv float %x1, 3.0 ; thwart complexity-based canonicalization
-  %mul1 = fmul float %x, %y
-  %mul2 = fmul reassoc nnan float %x, %mul1
-  ret float %mul2
-}
-
-; X * (Y*X) => (X*X) * Y
-
-define float @reassoc_common_operand4(float %x1, float %y) {
-; CHECK-LABEL: @reassoc_common_operand4(
-; CHECK-NEXT:    [[X:%.*]] = fdiv float [[X1:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc ninf float [[X]], [[X]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc ninf float [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %x = fdiv float %x1, 3.0 ; thwart complexity-based canonicalization
-  %mul1 = fmul float %y, %x
-  %mul2 = fmul reassoc ninf float %x, %mul1
-  ret float %mul2
-}
-
-; No change if the first fmul has another use.
-
-define float @reassoc_common_operand_multi_use(float %x, float %y) {
-; CHECK-LABEL: @reassoc_common_operand_multi_use(
-; CHECK-NEXT:    [[MUL1:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast float [[MUL1]], [[X]]
-; CHECK-NEXT:    call void @use_f32(float [[MUL1]])
-; CHECK-NEXT:    ret float [[MUL2]]
-;
-  %mul1 = fmul float %x, %y
-  %mul2 = fmul fast float %mul1, %x
-  call void @use_f32(float %mul1)
-  ret float %mul2
-}
-
-declare float @llvm.log2.f32(float)
-
-; log2(Y * 0.5) * X = log2(Y) * X - X
-
-define float @log2half(float %x, float %y) {
-; CHECK-LABEL: @log2half(
-; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.log2.f32(float [[Y:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast float [[TMP1]], [[X:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fsub fast float [[TMP2]], [[X]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %halfy = fmul float %y, 0.5
-  %log2 = call float @llvm.log2.f32(float %halfy)
-  %mul = fmul fast float %log2, %x
-  ret float %mul
-}
-
-define float @log2half_commute(float %x1, float %y) {
-; CHECK-LABEL: @log2half_commute(
-; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.log2.f32(float [[Y:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast float [[TMP1]], [[X1:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = fsub fast float [[TMP2]], [[X1]]
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP3]], 0x3FC24924A0000000
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %x = fdiv float %x1, 7.0 ; thwart complexity-based canonicalization
-  %halfy = fmul float %y, 0.5
-  %log2 = call float @llvm.log2.f32(float %halfy)
-  %mul = fmul fast float %x, %log2
-  ret float %mul
-}
-
-; C1/X * C2 => (C1*C2) / X
-
-define float @fdiv_constant_numerator_fmul(float %x) {
-; CHECK-LABEL: @fdiv_constant_numerator_fmul(
-; CHECK-NEXT:    [[T3:%.*]] = fdiv reassoc float 1.200000e+07, [[X:%.*]]
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fdiv float 2.0e+3, %x
-  %t3 = fmul reassoc float %t1, 6.0e+3
-  ret float %t3
-}
-
-; C1/X * C2 => (C1*C2) / X is disabled if C1/X has multiple uses
-
- at fmul2_external = external global float
-
-define float @fdiv_constant_numerator_fmul_extra_use(float %x) {
-; CHECK-LABEL: @fdiv_constant_numerator_fmul_extra_use(
-; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast float 1.000000e+00, [[X:%.*]]
-; CHECK-NEXT:    store float [[DIV]], float* @fmul2_external, align 4
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[DIV]], 2.000000e+00
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %div = fdiv fast float 1.0, %x
-  store float %div, float* @fmul2_external
-  %mul = fmul fast float %div, 2.0
-  ret float %mul
-}
-
-; X/C1 * C2 => X * (C2/C1) (if C2/C1 is normal FP)
-
-define float @fdiv_constant_denominator_fmul(float %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul(
-; CHECK-NEXT:    [[T3:%.*]] = fmul reassoc float [[X:%.*]], 3.000000e+00
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fdiv float %x, 2.0e+3
-  %t3 = fmul reassoc float %t1, 6.0e+3
-  ret float %t3
-}
-
-define <4 x float> @fdiv_constant_denominator_fmul_vec(<4 x float> %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul_vec(
-; CHECK-NEXT:    [[T3:%.*]] = fmul reassoc <4 x float> [[X:%.*]], <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT:    ret <4 x float> [[T3]]
-;
-  %t1 = fdiv <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
-  %t3 = fmul reassoc <4 x float> %t1, <float 6.0e+3, float 6.0e+3, float 2.0e+3, float 1.0e+3>
-  ret <4 x float> %t3
-}
-
-; Make sure fmul with constant expression doesn't assert.
-
-define <4 x float> @fdiv_constant_denominator_fmul_vec_constexpr(<4 x float> %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul_vec_constexpr(
-; CHECK-NEXT:    [[T3:%.*]] = fmul reassoc <4 x float> [[X:%.*]], <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT:    ret <4 x float> [[T3]]
-;
-  %constExprMul = bitcast i128 trunc (i160 bitcast (<5 x float> <float 6.0e+3, float 6.0e+3, float 2.0e+3, float 1.0e+3, float undef> to i160) to i128) to <4 x float>
-  %t1 = fdiv <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
-  %t3 = fmul reassoc <4 x float> %t1, %constExprMul
-  ret <4 x float> %t3
-}
-
-; This shows that at least part of instcombine does not check constant
-; values to see if it is creating denorms (0x3800000000000000 is a denorm
-; for 32-bit float), so protecting against denorms in other parts is
-; probably not doing the intended job.
-
-define float @fmul_constant_reassociation(float %x) {
-; CHECK-LABEL: @fmul_constant_reassociation(
-; CHECK-NEXT:    [[R:%.*]] = fmul reassoc nsz float [[X:%.*]], 0x3800000000000000
-; CHECK-NEXT:    ret float [[R]]
-;
-  %mul_flt_min = fmul reassoc nsz float %x, 0x3810000000000000
-  %r = fmul reassoc nsz float  %mul_flt_min, 0.5
-  ret float %r
-}
-
-; Canonicalization "X/C1 * C2 => X * (C2/C1)" still applies if C2/C1 is denormal
-; (otherwise, we should not have allowed the reassociation in the previous test).
-; 0x3810000000000000 == FLT_MIN
-
-define float @fdiv_constant_denominator_fmul_denorm(float %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul_denorm(
-; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[X:%.*]], 0x3760620000000000
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fdiv float %x, 2.0e+3
-  %t3 = fmul fast float %t1, 0x3810000000000000
-  ret float %t3
-}
-
-; X / C1 * C2 => X / (C2/C1) if C1/C2 is abnormal, but C2/C1 is a normal value.
-; TODO: We don't convert the fast fdiv to fmul because that would be multiplication
-; by a denormal, but we could do better when we know that denormals are not a problem.
-
-define float @fdiv_constant_denominator_fmul_denorm_try_harder(float %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul_denorm_try_harder(
-; CHECK-NEXT:    [[T3:%.*]] = fdiv reassoc float [[X:%.*]], 0x47E8000000000000
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fdiv float %x, 3.0
-  %t3 = fmul reassoc float %t1, 0x3810000000000000
-  ret float %t3
-}
-
-; Negative test: we should not have 2 divisions instead of the 1 we started with.
-
-define float @fdiv_constant_denominator_fmul_denorm_try_harder_extra_use(float %x) {
-; CHECK-LABEL: @fdiv_constant_denominator_fmul_denorm_try_harder_extra_use(
-; CHECK-NEXT:    [[T1:%.*]] = fdiv float [[X:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T1]], 0x3810000000000000
-; CHECK-NEXT:    [[R:%.*]] = fadd float [[T1]], [[T3]]
-; CHECK-NEXT:    ret float [[R]]
-;
-  %t1 = fdiv float %x, 3.0e+0
-  %t3 = fmul fast float %t1, 0x3810000000000000
-  %r = fadd float %t1, %t3
-  ret float %r
-}
-
-; (X + C1) * C2 --> (X * C2) + C1*C2
-
-define float @fmul_fadd_distribute(float %x) {
-; CHECK-LABEL: @fmul_fadd_distribute(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc float [[X:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc float [[TMP1]], 6.000000e+00
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t2 = fadd float %x, 2.0
-  %t3 = fmul reassoc float %t2, 3.0
-  ret float %t3
-}
-
-; (X - C1) * C2 --> (X * C2) - C1*C2
-
-define float @fmul_fsub_distribute1(float %x) {
-; CHECK-LABEL: @fmul_fsub_distribute1(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc float [[X:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc float [[TMP1]], -6.000000e+00
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t2 = fsub float %x, 2.0
-  %t3 = fmul reassoc float %t2, 3.0
-  ret float %t3
-}
-
-; (C1 - X) * C2 --> C1*C2 - (X * C2)
-
-define float @fmul_fsub_distribute2(float %x) {
-; CHECK-LABEL: @fmul_fsub_distribute2(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc float [[X:%.*]], 3.000000e+00
-; CHECK-NEXT:    [[T3:%.*]] = fsub reassoc float 6.000000e+00, [[TMP1]]
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t2 = fsub float 2.0, %x
-  %t3 = fmul reassoc float %t2, 3.0
-  ret float %t3
-}
-
-; FIXME: This should only need 'reassoc'.
-; ((X*C1) + C2) * C3 => (X * (C1*C3)) + (C2*C3)
-
-define float @fmul_fadd_fmul_distribute(float %x) {
-; CHECK-LABEL: @fmul_fadd_fmul_distribute(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast float [[X:%.*]], 3.000000e+01
-; CHECK-NEXT:    [[T3:%.*]] = fadd fast float [[TMP1]], 1.000000e+01
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fadd float %t1, 2.0
-  %t3 = fmul fast float %t2, 5.0
-  ret float %t3
-}
-
-define float @fmul_fadd_distribute_extra_use(float %x) {
-; CHECK-LABEL: @fmul_fadd_distribute_extra_use(
-; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fadd float [[T1]], 2.000000e+00
-; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
-; CHECK-NEXT:    call void @use_f32(float [[T2]])
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fadd float %t1, 2.0
-  %t3 = fmul fast float %t2, 5.0
-  call void @use_f32(float %t2)
-  ret float %t3
-}
-
-; (X/C1 + C2) * C3 => X/(C1/C3) + C2*C3
-; 0x10000000000000 = DBL_MIN
-; TODO: We don't convert the fast fdiv to fmul because that would be multiplication
-; by a denormal, but we could do better when we know that denormals are not a problem.
-
-define double @fmul_fadd_fdiv_distribute2(double %x) {
-; CHECK-LABEL: @fmul_fadd_fdiv_distribute2(
-; CHECK-NEXT:    [[TMP1:%.*]] = fdiv reassoc double [[X:%.*]], 0x7FE8000000000000
-; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc double [[TMP1]], 0x34000000000000
-; CHECK-NEXT:    ret double [[T3]]
-;
-  %t1 = fdiv double %x, 3.0
-  %t2 = fadd double %t1, 5.0
-  %t3 = fmul reassoc double %t2, 0x10000000000000
-  ret double %t3
-}
-
-; 5.0e-1 * DBL_MIN yields denormal, so "(f1*3.0 + 5.0e-1) * DBL_MIN" cannot
-; be simplified into f1 * (3.0*DBL_MIN) + (5.0e-1*DBL_MIN)
-
-define double @fmul_fadd_fdiv_distribute3(double %x) {
-; CHECK-LABEL: @fmul_fadd_fdiv_distribute3(
-; CHECK-NEXT:    [[TMP1:%.*]] = fdiv reassoc double [[X:%.*]], 0x7FE8000000000000
-; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc double [[TMP1]], 0x34000000000000
-; CHECK-NEXT:    ret double [[T3]]
-;
-  %t1 = fdiv double %x, 3.0
-  %t2 = fadd double %t1, 5.0
-  %t3 = fmul reassoc double %t2, 0x10000000000000
-  ret double %t3
-}
-
-; FIXME: This should only need 'reassoc'.
-; (C2 - (X*C1)) * C3 => (C2*C3) - (X * (C1*C3))
-
-define float @fmul_fsub_fmul_distribute(float %x) {
-; CHECK-LABEL: @fmul_fsub_fmul_distribute(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast float [[X:%.*]], 3.000000e+01
-; CHECK-NEXT:    [[T3:%.*]] = fsub fast float 1.000000e+01, [[TMP1]]
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
-  %t3 = fmul fast float %t2, 5.0
-  ret float %t3
-}
-
-define float @fmul_fsub_fmul_distribute_extra_use(float %x) {
-; CHECK-LABEL: @fmul_fsub_fmul_distribute_extra_use(
-; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fsub float 2.000000e+00, [[T1]]
-; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
-; CHECK-NEXT:    call void @use_f32(float [[T2]])
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
-  %t3 = fmul fast float %t2, 5.0
-  call void @use_f32(float %t2)
-  ret float %t3
-}
-
-; FIXME: This should only need 'reassoc'.
-; ((X*C1) - C2) * C3 => (X * (C1*C3)) - C2*C3
-
-define float @fmul_fsub_fmul_distribute2(float %x) {
-; CHECK-LABEL: @fmul_fsub_fmul_distribute2(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast float [[X:%.*]], 3.000000e+01
-; CHECK-NEXT:    [[T3:%.*]] = fadd fast float [[TMP1]], -1.000000e+01
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fsub float %t1, 2.0
-  %t3 = fmul fast float %t2, 5.0
-  ret float %t3
-}
-
-define float @fmul_fsub_fmul_distribute2_extra_use(float %x) {
-; CHECK-LABEL: @fmul_fsub_fmul_distribute2_extra_use(
-; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fsub float 2.000000e+00, [[T1]]
-; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
-; CHECK-NEXT:    call void @use_f32(float [[T2]])
-; CHECK-NEXT:    ret float [[T3]]
-;
-  %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
-  %t3 = fmul fast float %t2, 5.0
-  call void @use_f32(float %t2)
-  ret float %t3
-}
-
-; "(X*Y) * X => (X*X) * Y" is disabled if "X*Y" has multiple uses
-
-define float @common_factor(float %x, float %y) {
-; CHECK-LABEL: @common_factor(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast float [[MUL]], [[X]]
-; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[MUL1]], [[MUL]]
-; CHECK-NEXT:    ret float [[ADD]]
-;
-  %mul = fmul float %x, %y
-  %mul1 = fmul fast float %mul, %x
-  %add = fadd float %mul1, %mul
-  ret float %add
-}
-
-define double @fmul_fdiv_factor_squared(double %x, double %y) {
-; CHECK-LABEL: @fmul_fdiv_factor_squared(
-; CHECK-NEXT:    [[DIV:%.*]] = fdiv fast double [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[SQUARED:%.*]] = fmul fast double [[DIV]], [[DIV]]
-; CHECK-NEXT:    ret double [[SQUARED]]
-;
-  %div = fdiv fast double %x, %y
-  %squared = fmul fast double %div, %div
-  ret double %squared
-}
-
-define double @fmul_fdivs_factor_common_denominator(double %x, double %y, double %z) {
-; CHECK-LABEL: @fmul_fdivs_factor_common_denominator(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast double [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast double [[Z:%.*]], [[Z]]
-; CHECK-NEXT:    [[MUL:%.*]] = fdiv fast double [[TMP1]], [[TMP2]]
-; CHECK-NEXT:    ret double [[MUL]]
-;
-  %div1 = fdiv double %x, %z
-  %div2 = fdiv double %y, %z
-  %mul = fmul fast double %div1, %div2
-  ret double %mul
-}
-
-define double @fmul_fdivs_factor(double %x, double %y, double %z, double %w) {
-; CHECK-LABEL: @fmul_fdivs_factor(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[Z:%.*]], [[X:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = fdiv reassoc double [[TMP1]], [[W:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP2]], [[Y:%.*]]
-; CHECK-NEXT:    ret double [[MUL]]
-;
-  %div1 = fdiv double %x, %y
-  %div2 = fdiv double %z, %w
-  %mul = fmul reassoc double %div1, %div2
-  ret double %mul
-}
-
-define double @fmul_fdiv_factor(double %x, double %y, double %z) {
-; CHECK-LABEL: @fmul_fdiv_factor(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[X:%.*]], [[Z:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret double [[MUL]]
-;
-  %div = fdiv double %x, %y
-  %mul = fmul reassoc double %div, %z
-  ret double %mul
-}
-
-define double @fmul_fdiv_factor_constant1(double %x, double %y) {
-; CHECK-LABEL: @fmul_fdiv_factor_constant1(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[X:%.*]], 4.200000e+01
-; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT:    ret double [[MUL]]
-;
-  %div = fdiv double %x, %y
-  %mul = fmul reassoc double %div, 42.0
-  ret double %mul
-}
-
-define <2 x float> @fmul_fdiv_factor_constant2(<2 x float> %x, <2 x float> %y) {
-; CHECK-LABEL: @fmul_fdiv_factor_constant2(
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc <2 x float> [[TMP1]], <float 4.200000e+01, float 1.200000e+01>
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %div = fdiv <2 x float> %x, <float 42.0, float 12.0>
-  %mul = fmul reassoc <2 x float> %div, %y
-  ret <2 x float> %mul
-}
-
-define float @fmul_fdiv_factor_extra_use(float %x, float %y) {
-; CHECK-LABEL: @fmul_fdiv_factor_extra_use(
-; CHECK-NEXT:    [[DIV:%.*]] = fdiv float [[X:%.*]], 4.200000e+01
-; CHECK-NEXT:    call void @use_f32(float [[DIV]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc float [[DIV]], [[Y:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %div = fdiv float %x, 42.0
-  call void @use_f32(float %div)
-  %mul = fmul reassoc float %div, %y
-  ret float %mul
-}
-
-; Avoid infinite looping by moving negation out of a constant expression.
-
- at g = external global {[2 x i8*]}, align 1
-
-define double @fmul_negated_constant_expression(double %x) {
-; CHECK-LABEL: @fmul_negated_constant_expression(
-; CHECK-NEXT:    [[R:%.*]] = fmul double [[X:%.*]], fsub (double -0.000000e+00, double bitcast (i64 ptrtoint (i8** getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @g, i64 0, inrange i32 0, i64 2) to i64) to double))
-; CHECK-NEXT:    ret double [[R]]
-;
-  %r = fmul double %x, fsub (double -0.000000e+00, double bitcast (i64 ptrtoint (i8** getelementptr inbounds ({ [2 x i8*] }, { [2 x i8*] }* @g, i64 0, inrange i32 0, i64 2) to i64) to double))
-  ret double %r
-}
-
-define float @negate_if_true(float %x, i1 %cond) {
-; CHECK-LABEL: @negate_if_true(
-; CHECK-NEXT:    [[TMP1:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[COND:%.*]], float [[TMP1]], float [[X]]
-; CHECK-NEXT:    ret float [[TMP2]]
-;
-  %sel = select i1 %cond, float -1.0, float 1.0
-  %r = fmul float %sel, %x
-  ret float %r
-}
-
-define float @negate_if_false(float %x, i1 %cond) {
-; CHECK-LABEL: @negate_if_false(
-; CHECK-NEXT:    [[TMP1:%.*]] = fneg arcp float [[X:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = select arcp i1 [[COND:%.*]], float [[X]], float [[TMP1]]
-; CHECK-NEXT:    ret float [[TMP2]]
-;
-  %sel = select i1 %cond, float 1.0, float -1.0
-  %r = fmul arcp float %sel, %x
-  ret float %r
-}
-
-define <2 x double> @negate_if_true_commute(<2 x double> %px, i1 %cond) {
-; CHECK-LABEL: @negate_if_true_commute(
-; CHECK-NEXT:    [[X:%.*]] = fdiv <2 x double> <double 4.200000e+01, double 4.200000e+01>, [[PX:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = fneg ninf <2 x double> [[X]]
-; CHECK-NEXT:    [[TMP2:%.*]] = select ninf i1 [[COND:%.*]], <2 x double> [[TMP1]], <2 x double> [[X]]
-; CHECK-NEXT:    ret <2 x double> [[TMP2]]
-;
-  %x = fdiv <2 x double> <double 42.0, double 42.0>, %px  ; thwart complexity-based canonicalization
-  %sel = select i1 %cond, <2 x double> <double -1.0, double -1.0>, <2 x double> <double 1.0, double 1.0>
-  %r = fmul ninf <2 x double> %x, %sel
-  ret <2 x double> %r
-}
-
-define <2 x double> @negate_if_false_commute(<2 x double> %px, <2 x i1> %cond) {
-; CHECK-LABEL: @negate_if_false_commute(
-; CHECK-NEXT:    [[X:%.*]] = fdiv <2 x double> <double 4.200000e+01, double 5.100000e+00>, [[PX:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = fneg <2 x double> [[X]]
-; CHECK-NEXT:    [[TMP2:%.*]] = select <2 x i1> [[COND:%.*]], <2 x double> [[X]], <2 x double> [[TMP1]]
-; CHECK-NEXT:    ret <2 x double> [[TMP2]]
-;
-  %x = fdiv <2 x double> <double 42.0, double 5.1>, %px  ; thwart complexity-based canonicalization
-  %sel = select <2 x i1> %cond, <2 x double> <double 1.0, double 1.0>, <2 x double> <double -1.0, double -1.0>
-  %r = fmul <2 x double> %x, %sel
-  ret <2 x double> %r
-}
-
-; Negative test
-
-define float @negate_if_true_extra_use(float %x, i1 %cond) {
-; CHECK-LABEL: @negate_if_true_extra_use(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[COND:%.*]], float -1.000000e+00, float 1.000000e+00
-; CHECK-NEXT:    call void @use_f32(float [[SEL]])
-; CHECK-NEXT:    [[R:%.*]] = fmul float [[SEL]], [[X:%.*]]
-; CHECK-NEXT:    ret float [[R]]
-;
-  %sel = select i1 %cond, float -1.0, float 1.0
-  call void @use_f32(float %sel)
-  %r = fmul float %sel, %x
-  ret float %r
-}
-
-; Negative test
-
-define <2 x double> @negate_if_true_wrong_constant(<2 x double> %px, i1 %cond) {
-; CHECK-LABEL: @negate_if_true_wrong_constant(
-; CHECK-NEXT:    [[X:%.*]] = fdiv <2 x double> <double 4.200000e+01, double 4.200000e+01>, [[PX:%.*]]
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[COND:%.*]], <2 x double> <double -1.000000e+00, double 0.000000e+00>, <2 x double> <double 1.000000e+00, double 1.000000e+00>
-; CHECK-NEXT:    [[R:%.*]] = fmul <2 x double> [[X]], [[SEL]]
-; CHECK-NEXT:    ret <2 x double> [[R]]
-;
-  %x = fdiv <2 x double> <double 42.0, double 42.0>, %px  ; thwart complexity-based canonicalization
-  %sel = select i1 %cond, <2 x double> <double -1.0, double 0.0>, <2 x double> <double 1.0, double 1.0>
-  %r = fmul <2 x double> %x, %sel
-  ret <2 x double> %r
-}
-
-; X *fast (C ? 1.0 : 0.0) -> C ? X : 0.0
-define float @fmul_select(float %x, i1 %c) {
-; CHECK-LABEL: @fmul_select(
-; CHECK-NEXT:    [[MUL:%.*]] = select fast i1 [[C:%.*]], float [[X:%.*]], float 0.000000e+00
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sel = select i1 %c, float 1.0, float 0.0
-  %mul = fmul fast float %sel, %x
-  ret float %mul
-}
-
-; X *fast (C ? 1.0 : 0.0) -> C ? X : 0.0
-define <2 x float> @fmul_select_vec(<2 x float> %x, i1 %c) {
-; CHECK-LABEL: @fmul_select_vec(
-; CHECK-NEXT:    [[MUL:%.*]] = select fast i1 [[C:%.*]], <2 x float> [[X:%.*]], <2 x float> zeroinitializer
-; CHECK-NEXT:    ret <2 x float> [[MUL]]
-;
-  %sel = select i1 %c, <2 x float> <float 1.0, float 1.0>, <2 x float> zeroinitializer
-  %mul = fmul fast <2 x float> %sel, %x
-  ret <2 x float> %mul
-}
-
-; Without fast math flags we can't optimize X * (C ? 1.0 : 0.0) -> C ? X : 0.0
-define float @fmul_select_strict(float %x, i1 %c) {
-; CHECK-LABEL: @fmul_select_strict(
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], float 1.000000e+00, float 0.000000e+00
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SEL]], [[X:%.*]]
-; CHECK-NEXT:    ret float [[MUL]]
-;
-  %sel = select i1 %c, float 1.0, float 0.0
-  %mul = fmul float %sel, %x
-  ret float %mul
-}
-
-; sqrt(X) *fast (C ? sqrt(X) : 1.0) -> C ? X : sqrt(X)
-define double @fmul_sqrt_select(double %x, i1 %c) {
-; CHECK-LABEL: @fmul_sqrt_select(
-; CHECK-NEXT:    [[SQR:%.*]] = call double @llvm.sqrt.f64(double [[X:%.*]])
-; CHECK-NEXT:    [[MUL:%.*]] = select fast i1 [[C:%.*]], double [[X]], double [[SQR]]
-; CHECK-NEXT:    ret double [[MUL]]
-;
-  %sqr = call double @llvm.sqrt.f64(double %x)
-  %sel = select i1 %c, double %sqr, double 1.0
-  %mul = fmul fast double %sqr, %sel
-  ret double %mul
-}
 
 ; fastmath => z * splat(0) = splat(0), even for scalable vectors
 define <vscale x 2 x float> @mul_scalable_splat_zero(<vscale x 2 x float> %z) {


        


More information about the llvm-branch-commits mailing list