[llvm] 7699647 - [InstCombine] Add trunc+zext 'narrow' funnel shift tests (PR35155)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 16 04:07:00 PDT 2020


Author: Simon Pilgrim
Date: 2020-10-16T12:06:47+01:00
New Revision: 76996470ef3e16b20498bc1ee083ef5a50e5ff8f

URL: https://github.com/llvm/llvm-project/commit/76996470ef3e16b20498bc1ee083ef5a50e5ff8f
DIFF: https://github.com/llvm/llvm-project/commit/76996470ef3e16b20498bc1ee083ef5a50e5ff8f.diff

LOG: [InstCombine] Add trunc+zext 'narrow' funnel shift tests (PR35155)

Based on the rotation equivalents in rotate.ll

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/funnel.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll
index 5c9427973430..542ec5f84af7 100644
--- a/llvm/test/Transforms/InstCombine/funnel.ll
+++ b/llvm/test/Transforms/InstCombine/funnel.ll
@@ -200,3 +200,104 @@ define <2 x i64> @fshr_sub_mask_vector(<2 x i64> %x, <2 x i64> %y, <2 x i64> %a)
   %r = or <2 x i64> %shl, %shr
   ret <2 x i64> %r
 }
+
+; PR35155 - these are optionally UB-free funnel shift left/right patterns that are narrowed to a smaller bitwidth.
+
+define i16 @fshl_16bit(i16 %x, i16 %y, i32 %shift) {
+; CHECK-LABEL: @fshl_16bit(
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHIFT:%.*]], 15
+; CHECK-NEXT:    [[CONVX:%.*]] = zext i16 [[X:%.*]] to i32
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[CONVX]], [[AND]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 16, [[AND]]
+; CHECK-NEXT:    [[CONVY:%.*]] = zext i16 [[X]] to i32
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[CONVY]], [[SUB]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[CONV2:%.*]] = trunc i32 [[OR]] to i16
+; CHECK-NEXT:    ret i16 [[CONV2]]
+;
+  %and = and i32 %shift, 15
+  %convx = zext i16 %x to i32
+  %shl = shl i32 %convx, %and
+  %sub = sub i32 16, %and
+  %convy = zext i16 %x to i32
+  %shr = lshr i32 %convy, %sub
+  %or = or i32 %shr, %shl
+  %conv2 = trunc i32 %or to i16
+  ret i16 %conv2
+}
+
+; Commute the 'or' operands and try a vector type.
+
+define <2 x i16> @fshl_commute_16bit_vec(<2 x i16> %x, <2 x i16> %y, <2 x i32> %shift) {
+; CHECK-LABEL: @fshl_commute_16bit_vec(
+; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[SHIFT:%.*]], <i32 15, i32 15>
+; CHECK-NEXT:    [[CONVX:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[SHL:%.*]] = shl <2 x i32> [[CONVX]], [[AND]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw <2 x i32> <i32 16, i32 16>, [[AND]]
+; CHECK-NEXT:    [[CONVY:%.*]] = zext <2 x i16> [[Y:%.*]] to <2 x i32>
+; CHECK-NEXT:    [[SHR:%.*]] = lshr <2 x i32> [[CONVY]], [[SUB]]
+; CHECK-NEXT:    [[OR:%.*]] = or <2 x i32> [[SHL]], [[SHR]]
+; CHECK-NEXT:    [[CONV2:%.*]] = trunc <2 x i32> [[OR]] to <2 x i16>
+; CHECK-NEXT:    ret <2 x i16> [[CONV2]]
+;
+  %and = and <2 x i32> %shift, <i32 15, i32 15>
+  %convx = zext <2 x i16> %x to <2 x i32>
+  %shl = shl <2 x i32> %convx, %and
+  %sub = sub <2 x i32> <i32 16, i32 16>, %and
+  %convy = zext <2 x i16> %y to <2 x i32>
+  %shr = lshr <2 x i32> %convy, %sub
+  %or = or <2 x i32> %shl, %shr
+  %conv2 = trunc <2 x i32> %or to <2 x i16>
+  ret <2 x i16> %conv2
+}
+
+; Change the size, shift direction (the subtract is on the left-shift), and mask op.
+
+define i8 @fshr_8bit(i8 %x, i8 %y, i3 %shift) {
+; CHECK-LABEL: @fshr_8bit(
+; CHECK-NEXT:    [[AND:%.*]] = zext i3 [[SHIFT:%.*]] to i32
+; CHECK-NEXT:    [[CONVX:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[CONVX]], [[AND]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 8, [[AND]]
+; CHECK-NEXT:    [[CONVY:%.*]] = zext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[CONVY]], [[SUB]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT:    [[CONV2:%.*]] = trunc i32 [[OR]] to i8
+; CHECK-NEXT:    ret i8 [[CONV2]]
+;
+  %and = zext i3 %shift to i32
+  %convx = zext i8 %x to i32
+  %shr = lshr i32 %convx, %and
+  %sub = sub i32 8, %and
+  %convy = zext i8 %y to i32
+  %shl = shl i32 %convy, %sub
+  %or = or i32 %shl, %shr
+  %conv2 = trunc i32 %or to i8
+  ret i8 %conv2
+}
+
+; The shifted value does not need to be a zexted value; here it is masked.
+; The shift mask could be less than the bitwidth, but this is still ok.
+
+define i8 @fshr_commute_8bit(i32 %x, i32 %y, i32 %shift) {
+; CHECK-LABEL: @fshr_commute_8bit(
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHIFT:%.*]], 3
+; CHECK-NEXT:    [[CONVX:%.*]] = and i32 [[X:%.*]], 255
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[CONVX]], [[AND]]
+; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 8, [[AND]]
+; CHECK-NEXT:    [[CONVY:%.*]] = and i32 [[Y:%.*]], 255
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[CONVY]], [[SUB]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
+; CHECK-NEXT:    [[CONV2:%.*]] = trunc i32 [[OR]] to i8
+; CHECK-NEXT:    ret i8 [[CONV2]]
+;
+  %and = and i32 %shift, 3
+  %convx = and i32 %x, 255
+  %shr = lshr i32 %convx, %and
+  %sub = sub i32 8, %and
+  %convy = and i32 %y, 255
+  %shl = shl i32 %convy, %sub
+  %or = or i32 %shr, %shl
+  %conv2 = trunc i32 %or to i8
+  ret i8 %conv2
+}


        


More information about the llvm-commits mailing list