[llvm] 726130e - [NFC][InstCombine] Improve test coverage for invertible extra uses of hands of logical op

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 18 18:01:33 PST 2022


Author: Roman Lebedev
Date: 2022-12-19T04:55:31+03:00
New Revision: 726130ee935d31a80e37526d0182a3a6df59b82f

URL: https://github.com/llvm/llvm-project/commit/726130ee935d31a80e37526d0182a3a6df59b82f
DIFF: https://github.com/llvm/llvm-project/commit/726130ee935d31a80e37526d0182a3a6df59b82f.diff

LOG: [NFC][InstCombine] Improve test coverage for invertible extra uses of hands of logical op

While there, reduce bitwidth, we really don't care about testing i32 specifically.

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll
    llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-and.ll
    llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-or.ll
    llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-or.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll
index 9c83ff18ff77..090e832d8f43 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll
@@ -8,87 +8,106 @@
 ; iff y is free to invert and all uses of z can be freely updated.
 
 declare void @use1(i1)
+declare void @use8(i8)
 
 ; Most basic positive test
-define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = or i1 [[I1]], [[I0:%.*]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = and i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = or i1 [[I1]], [[I0]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = and i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
 
 ; All users of %i3 must be invertible
-define i1 @n2(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i1 @n2(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = and i1 [[I1]], [[I2]]
 ; CHECK-NEXT:    ret i1 [[I3]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = and i1 %i2, %i1
   ret i1 %i3 ; can not be inverted
 }
 
 ; %i1 must be invertible
-define i32 @n3(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @n3(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n3(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I1]])
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = and i1 [[I1]], [[I2]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra uninvertible use
+  %i1 = icmp eq i8 %v0, %v1 ; has extra uninvertible use
   call void @use1(i1 %i1) ; bad extra use
   %i2 = xor i1 %i0, -1
   %i3 = and i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
 
-; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
-; CHECK-LABEL: @n4(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    store i32 [[I2]], ptr [[DST:%.*]], align 4
+; Extra uses are invertible
+define i8 @t4(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
 ; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I4:%.*]] = and i1 [[I1]], [[I3]]
-; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
-; CHECK-NEXT:    ret i32 [[I5]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
-  %i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
-  store i32 %i2, ptr %dst
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
   %i3 = xor i1 %i0, -1
   %i4 = and i1 %i3, %i1
-  %i5 = select i1 %i4, i32 %v4, i32 %v5
-  ret i32 %i5
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
+}
+define i8 @t4_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4_commutative(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
+; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
+; CHECK-NEXT:    [[I4:%.*]] = and i1 [[I1]], [[I3]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
+;
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
+  %i3 = xor i1 %i0, -1
+  %i4 = and i1 %i1, %i3
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
 }

diff  --git a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-and.ll b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-and.ll
index c858f66790be..9fea168e3b8a 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-and.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-and.ll
@@ -8,117 +8,136 @@
 ; iff y is free to invert and all uses of z can be freely updated.
 
 declare void @use1(i1)
+declare void @use8(i8)
 
 ; Most basic positive test
-define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I0:%.*]], i1 true, i1 [[I1]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 %i1, i1 false
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t0_commutative(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0_commutative(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I1]], i1 true, i1 [[I0:%.*]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i1, i1 %i2, i1 false
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I0]], i1 true, i1 [[I1]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 %i1, i1 false
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
-define i32 @t1_commutative(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1_commutative(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1_commutative(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I1]], i1 true, i1 [[I0]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i1, i1 %i2, i1 false
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
 
 ; All users of %i3 must be invertible
-define i1 @n2(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i1 @n2(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = select i1 [[I2]], i1 [[I1]], i1 false
 ; CHECK-NEXT:    ret i1 [[I3]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 %i1, i1 false
   ret i1 %i3 ; can not be inverted
 }
 
 ; %i1 must be invertible
-define i32 @n3(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @n3(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n3(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I1]])
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = select i1 [[I2]], i1 [[I1]], i1 false
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra uninvertible use
+  %i1 = icmp eq i8 %v0, %v1 ; has extra uninvertible use
   call void @use1(i1 %i1) ; bad extra use
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 %i1, i1 false
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
 
-; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
-; CHECK-LABEL: @n4(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    store i32 [[I2]], ptr [[DST:%.*]], align 4
+; Extra uses are invertible
+define i8 @t4(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
 ; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT:    [[I4:%.*]] = and i1 [[I1]], [[I3]]
-; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
-; CHECK-NEXT:    ret i32 [[I5]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i1 [[I1]], i1 false
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
-  %i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
-  store i32 %i2, ptr %dst
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
   %i3 = xor i1 %i0, -1
-  %i4 = and i1 %i3, %i1
-  %i5 = select i1 %i4, i32 %v4, i32 %v5
-  ret i32 %i5
+  %i4 = select i1 %i3, i1 %i1, i1 false
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
+}
+define i8 @t4_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4_commutative(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
+; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I1]], i1 [[I3]], i1 false
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
+;
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
+  %i3 = xor i1 %i0, -1
+  %i4 = select i1 %i1, i1 %i3, i1 false
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
 }

diff  --git a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-or.ll b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-or.ll
index f09d4587a32c..d49b13aad7d7 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-or.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-logical-or.ll
@@ -8,117 +8,136 @@
 ; iff y is free to invert and all uses of z can be freely updated.
 
 declare void @use1(i1)
+declare void @use8(i8)
 
 ; Most basic positive test
-define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I0:%.*]], i1 [[I1]], i1 false
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 true, i1 %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t0_commutative(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0_commutative(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I1]], i1 [[I0:%.*]], i1 false
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i1, i1 true, i1 %i2
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I0]], i1 [[I1]], i1 false
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 true, i1 %i1
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
-define i32 @t1_commutative(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1_commutative(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1_commutative(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = select i1 [[I1]], i1 [[I0]], i1 false
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i1, i1 true, i1 %i2
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
 
 ; All users of %i3 must be invertible
-define i1 @n2(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i1 @n2(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = select i1 [[I2]], i1 true, i1 [[I1]]
 ; CHECK-NEXT:    ret i1 [[I3]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 true, i1 %i1
   ret i1 %i3 ; can not be inverted
 }
 
 ; %i1 must be invertible
-define i32 @n3(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @n3(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n3(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I1]])
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = select i1 [[I2]], i1 true, i1 [[I1]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra uninvertible use
+  %i1 = icmp eq i8 %v0, %v1 ; has extra uninvertible use
   call void @use1(i1 %i1) ; bad extra use
   %i2 = xor i1 %i0, -1
   %i3 = select i1 %i2, i1 true, i1 %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
 
-; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
-; CHECK-LABEL: @n4(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    store i32 [[I2]], ptr [[DST:%.*]], align 4
+; Extra uses are invertible
+define i8 @t4(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
 ; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT:    [[I4:%.*]] = or i1 [[I1]], [[I3]]
-; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
-; CHECK-NEXT:    ret i32 [[I5]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i1 true, i1 [[I1]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
-  %i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
-  store i32 %i2, ptr %dst
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
   %i3 = xor i1 %i0, -1
-  %i4 = or i1 %i3, %i1
-  %i5 = select i1 %i4, i32 %v4, i32 %v5
-  ret i32 %i5
+  %i4 = select i1 %i3, i1 true, i1 %i1
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
+}
+define i8 @t4_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4_commutative(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
+; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I1]], i1 true, i1 [[I3]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
+;
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
+  %i3 = xor i1 %i0, -1
+  %i4 = select i1 %i1, i1 true, i1 %i3
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
 }

diff  --git a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-or.ll b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-or.ll
index c5c8fec7fa72..ce7f4b545a20 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-or.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-or.ll
@@ -8,87 +8,106 @@
 ; iff y is free to invert and all uses of z can be freely updated.
 
 declare void @use1(i1)
+declare void @use8(i8)
 
 ; Most basic positive test
-define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @t0(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = and i1 [[I1]], [[I0:%.*]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V3:%.*]], i8 [[V2:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = or i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
-define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
+define i8 @t1(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT:    [[I0:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp ne i8 [[V2:%.*]], [[V3:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I0]])
 ; CHECK-NEXT:    [[I3_NOT:%.*]] = and i1 [[I1]], [[I0]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3_NOT]], i8 [[V5:%.*]], i8 [[V4:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i0 = icmp eq i32 %v0, %v1
-  %i1 = icmp eq i32 %v2, %v3
+  %i0 = icmp eq i8 %v0, %v1
+  %i1 = icmp eq i8 %v2, %v3
   call void @use1(i1 %i0)
   %i2 = xor i1 %i0, -1
   %i3 = or i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v4, i32 %v5
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v4, i8 %v5
+  ret i8 %i4
 }
 
 ; All users of %i3 must be invertible
-define i1 @n2(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i1 @n2(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = or i1 [[I1]], [[I2]]
 ; CHECK-NEXT:    ret i1 [[I3]]
 ;
-  %i1 = icmp eq i32 %v0, %v1
+  %i1 = icmp eq i8 %v0, %v1
   %i2 = xor i1 %i0, -1
   %i3 = or i1 %i2, %i1
   ret i1 %i3 ; can not be inverted
 }
 
 ; %i1 must be invertible
-define i32 @n3(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
+define i8 @n3(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3) {
 ; CHECK-LABEL: @n3(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
 ; CHECK-NEXT:    call void @use1(i1 [[I1]])
 ; CHECK-NEXT:    [[I2:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I3:%.*]] = or i1 [[I1]], [[I2]]
-; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    ret i32 [[I4]]
+; CHECK-NEXT:    [[I4:%.*]] = select i1 [[I3]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    ret i8 [[I4]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra uninvertible use
+  %i1 = icmp eq i8 %v0, %v1 ; has extra uninvertible use
   call void @use1(i1 %i1) ; bad extra use
   %i2 = xor i1 %i0, -1
   %i3 = or i1 %i2, %i1
-  %i4 = select i1 %i3, i32 %v2, i32 %v3
-  ret i32 %i4
+  %i4 = select i1 %i3, i8 %v2, i8 %v3
+  ret i8 %i4
 }
 
-; FIXME: we could invert all uses of %i1 here
-define i32 @n4(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5, ptr %dst) {
-; CHECK-LABEL: @n4(
-; CHECK-NEXT:    [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i32 [[V2:%.*]], i32 [[V3:%.*]]
-; CHECK-NEXT:    store i32 [[I2]], ptr [[DST:%.*]], align 4
+; Extra uses are invertible
+define i8 @t4(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
 ; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
 ; CHECK-NEXT:    [[I4:%.*]] = or i1 [[I1]], [[I3]]
-; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i32 [[V4:%.*]], i32 [[V5:%.*]]
-; CHECK-NEXT:    ret i32 [[I5]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
 ;
-  %i1 = icmp eq i32 %v0, %v1 ; has extra invertible use
-  %i2 = select i1 %i1, i32 %v2, i32 %v3 ; invertible use
-  store i32 %i2, ptr %dst
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
   %i3 = xor i1 %i0, -1
   %i4 = or i1 %i3, %i1
-  %i5 = select i1 %i4, i32 %v4, i32 %v5
-  ret i32 %i5
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
+}
+define i8 @t4_commutative(i1 %i0, i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8 %v4, i8 %v5) {
+; CHECK-LABEL: @t4_commutative(
+; CHECK-NEXT:    [[I1:%.*]] = icmp eq i8 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[I1]], i8 [[V2:%.*]], i8 [[V3:%.*]]
+; CHECK-NEXT:    call void @use8(i8 [[I2]])
+; CHECK-NEXT:    [[I3:%.*]] = xor i1 [[I0:%.*]], true
+; CHECK-NEXT:    [[I4:%.*]] = or i1 [[I1]], [[I3]]
+; CHECK-NEXT:    [[I5:%.*]] = select i1 [[I4]], i8 [[V4:%.*]], i8 [[V5:%.*]]
+; CHECK-NEXT:    ret i8 [[I5]]
+;
+  %i1 = icmp eq i8 %v0, %v1 ; has extra invertible use
+  %i2 = select i1 %i1, i8 %v2, i8 %v3 ; invertible use
+  call void @use8(i8 %i2)
+  %i3 = xor i1 %i0, -1
+  %i4 = or i1 %i1, %i3
+  %i5 = select i1 %i4, i8 %v4, i8 %v5
+  ret i8 %i5
 }


        


More information about the llvm-commits mailing list