[llvm] r372170 - [NFC][InstCombine] More tests for "Dropping pointless masking before left shift" (PR42563)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 17 12:32:11 PDT 2019


Author: lebedevri
Date: Tue Sep 17 12:32:11 2019
New Revision: 372170

URL: http://llvm.org/viewvc/llvm-project?rev=372170&view=rev
Log:
[NFC][InstCombine] More tests for "Dropping pointless masking before left shift" (PR42563)

While we already fold that pattern if the sum of shift amounts is not
smaller than bitwidth, there's painfully obvious generalization:
  https://rise4fun.com/Alive/F5R
I.e. the "sub of shift amounts" tells us how many bits will be left
in the output. If it's less than bitwidth, we simply need to
apply a mask, which is constant.

Added:
    llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll
    llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll
Modified:
    llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll
    llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll

Added: llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll?rev=372170&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll Tue Sep 17 12:32:11 2019
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   a)  (x & ((1 << maskNbits) - 1)) << shiftNbits
+; simplify to:
+;   (x << shiftNbits) & (~(-1 << (maskNbits+shiftNbits)))
+
+; Simple tests.
+
+declare void @use32(i32)
+
+define i32 @t0_basic(i32 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = shl i32 1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use32(i32 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T3]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = shl i32 1, %t0 ; shifting by nbits-1
+  %t2 = add i32 %t1, -1
+  %t3 = and i32 %t2, %x
+  %t4 = sub i32 32, %nbits
+  call void @use32(i32 %t0)
+  call void @use32(i32 %t1)
+  call void @use32(i32 %t2)
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t3, %t4
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+
+define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t1 = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %t0
+  %t2 = add <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t3 = and <8 x i32> %t2, %x
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi32(<8 x i32> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi32(<8 x i32> %t4)
+  %t5 = shl <8 x i32> %t3, %t4
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+  %t1 = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %t0
+  %t2 = add <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t3 = and <8 x i32> %t2, %x
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi32(<8 x i32> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi32(<8 x i32> %t4)
+  %t5 = shl <8 x i32> %t3, %t4
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n3_extrause(i32 %x, i32 %nbits) {
+; CHECK-LABEL: @n3_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = shl i32 1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use32(i32 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T3]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = shl i32 1, %t0 ; shifting by nbits-1
+  %t2 = add i32 %t1, -1
+  %t3 = and i32 %t2, %x ; this mask must be one-use.
+  %t4 = sub i32 32, %nbits
+  call void @use32(i32 %t0)
+  call void @use32(i32 %t1)
+  call void @use32(i32 %t2)
+  call void @use32(i32 %t3) ; BAD
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t3, %t4
+  ret i32 %t5
+}

Added: llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll?rev=372170&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll Tue Sep 17 12:32:11 2019
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   b)  (x & (~(-1 << maskNbits))) << shiftNbits
+; simplify to:
+;   (x << shiftNbits) & (~(-1 << (maskNbits+shiftNbits)))
+
+; Simple tests.
+
+declare void @use32(i32)
+
+define i32 @t0_basic(i32 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = shl i32 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor i32 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use32(i32 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T3]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = shl i32 -1, %t0 ; shifting by nbits-1
+  %t2 = xor i32 %t1, -1
+  %t3 = and i32 %t2, %x
+  %t4 = sub i32 32, %nbits
+  call void @use32(i32 %t0)
+  call void @use32(i32 %t1)
+  call void @use32(i32 %t2)
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t3, %t4
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+
+define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t1 = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %t0
+  %t2 = xor <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t3 = and <8 x i32> %t2, %x
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi32(<8 x i32> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi32(<8 x i32> %t4)
+  %t5 = shl <8 x i32> %t3, %t4
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+  %t1 = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %t0
+  %t2 = xor <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t3 = and <8 x i32> %t2, %x
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi32(<8 x i32> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi32(<8 x i32> %t4)
+  %t5 = shl <8 x i32> %t3, %t4
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n3_extrause(i32 %x, i32 %nbits) {
+; CHECK-LABEL: @n3_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = shl i32 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor i32 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use32(i32 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T3]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = shl i32 -1, %t0 ; shifting by nbits-1
+  %t2 = xor i32 %t1, -1
+  %t3 = and i32 %t2, %x ; this mask must be one-use.
+  %t4 = sub i32 32, %nbits
+  call void @use32(i32 %t0)
+  call void @use32(i32 %t1)
+  call void @use32(i32 %t2)
+  call void @use32(i32 %t3) ; BAD
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t3, %t4
+  ret i32 %t5
+}

Modified: llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll?rev=372170&r1=372169&r2=372170&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll Tue Sep 17 12:32:11 2019
@@ -407,28 +407,3 @@ define i32 @n13_not_minus_one(i32 %x, i3
   %t4 = shl i32 %t2, %t3
   ret i32 %t4
 }
-
-define i32 @n14_insifficient_sum(i32 %x, i32 %nbits) {
-; CHECK-LABEL: @n14_insifficient_sum(
-; CHECK-NEXT:    [[T0:%.*]] = shl i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[T1:%.*]] = add nsw i32 [[T0]], -1
-; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T3:%.*]] = sub i32 31, [[NBITS]]
-; CHECK-NEXT:    call void @use32(i32 [[T0]])
-; CHECK-NEXT:    call void @use32(i32 [[T1]])
-; CHECK-NEXT:    call void @use32(i32 [[T2]])
-; CHECK-NEXT:    call void @use32(i32 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = shl i32 [[T2]], [[T3]]
-; CHECK-NEXT:    ret i32 [[T4]]
-;
-  %t0 = shl i32 1, %nbits
-  %t1 = add nsw i32 %t0, -1
-  %t2 = and i32 %t1, %x
-  %t3 = sub i32 31, %nbits ; summary shift amount is less than 32
-  call void @use32(i32 %t0)
-  call void @use32(i32 %t1)
-  call void @use32(i32 %t2)
-  call void @use32(i32 %t3)
-  %t4 = shl i32 %t2, %t3
-  ret i32 %t4
-}

Modified: llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll?rev=372170&r1=372169&r2=372170&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll Tue Sep 17 12:32:11 2019
@@ -382,28 +382,3 @@ define i32 @n12_not_minus_one(i32 %x, i3
   %t4 = shl i32 %t2, %t3
   ret i32 %t4
 }
-
-define i32 @n13_insufficient_sum(i32 %x, i32 %nbits) {
-; CHECK-LABEL: @n13_insufficient_sum(
-; CHECK-NEXT:    [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT:    [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
-; CHECK-NEXT:    [[T3:%.*]] = sub i32 31, [[NBITS]]
-; CHECK-NEXT:    call void @use32(i32 [[T0]])
-; CHECK-NEXT:    call void @use32(i32 [[T1]])
-; CHECK-NEXT:    call void @use32(i32 [[T2]])
-; CHECK-NEXT:    call void @use32(i32 [[T3]])
-; CHECK-NEXT:    [[T4:%.*]] = shl i32 [[T2]], [[T3]]
-; CHECK-NEXT:    ret i32 [[T4]]
-;
-  %t0 = shl i32 -1, %nbits
-  %t1 = xor i32 %t0, -1
-  %t2 = and i32 %t1, %x
-  %t3 = sub i32 31, %nbits ; summary shift amount is less than 32
-  call void @use32(i32 %t0)
-  call void @use32(i32 %t1)
-  call void @use32(i32 %t2)
-  call void @use32(i32 %t3)
-  %t4 = shl i32 %t2, %t3
-  ret i32 %t4
-}




More information about the llvm-commits mailing list