[llvm] f9f4767 - [InstCombine] Precommit tests
Kazu Hirata via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 22 23:28:12 PDT 2023
Author: Kazu Hirata
Date: 2023-03-22T23:27:59-07:00
New Revision: f9f4767af9f3d89792d67ae8c5f65913ff263b89
URL: https://github.com/llvm/llvm-project/commit/f9f4767af9f3d89792d67ae8c5f65913ff263b89
DIFF: https://github.com/llvm/llvm-project/commit/f9f4767af9f3d89792d67ae8c5f65913ff263b89.diff
LOG: [InstCombine] Precommit tests
This patch precommits tests for:
https://github.com/llvm/llvm-project/issues/61183
Added:
Modified:
llvm/test/Transforms/InstCombine/bit_floor.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/bit_floor.ll b/llvm/test/Transforms/InstCombine/bit_floor.ll
index d436e53eb450..9daa8eee8969 100644
--- a/llvm/test/Transforms/InstCombine/bit_floor.ll
+++ b/llvm/test/Transforms/InstCombine/bit_floor.ll
@@ -39,6 +39,114 @@ define i64 @bit_floor_64(i64 %x) {
ret i64 %sel
}
+; Commutted select operands should still be recognized.
+define i32 @bit_floor_commuted_operands(i32 %x) {
+; CHECK-LABEL: @bit_floor_commuted_operands(
+; CHECK-NEXT: [[NE0_NOT:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[NE0_NOT]], i32 0, i32 [[SHL]]
+; CHECK-NEXT: ret i32 [[SEL]]
+;
+ %ne0 = icmp ne i32 %x, 0
+ %lshr = lshr i32 %x, 1
+ %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
+ %sub = sub i32 32, %ctlz
+ %shl = shl i32 1, %sub
+ %sel = select i1 %ne0, i32 %shl, i32 0
+ ret i32 %sel
+}
+
+; Negative test: lshr used twice
+define i32 @bit_floor_lshr_used_twice(i32 %x, ptr %p) {
+; CHECK-LABEL: @bit_floor_lshr_used_twice(
+; CHECK-NEXT: [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
+; CHECK-NEXT: store i32 [[LSHR]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: ret i32 [[SEL]]
+;
+ %eq0 = icmp eq i32 %x, 0
+ %lshr = lshr i32 %x, 1
+ %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
+ %sub = sub i32 32, %ctlz
+ %shl = shl i32 1, %sub
+ %sel = select i1 %eq0, i32 0, i32 %shl
+ store i32 %lshr, ptr %p, align 4
+ ret i32 %sel
+}
+
+; Negative test: ctlz used twice
+define i32 @bit_floor_ctlz_used_twice(i32 %x, ptr %p) {
+; CHECK-LABEL: @bit_floor_ctlz_used_twice(
+; CHECK-NEXT: [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
+; CHECK-NEXT: store i32 [[CTLZ]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: ret i32 [[SEL]]
+;
+ %eq0 = icmp eq i32 %x, 0
+ %lshr = lshr i32 %x, 1
+ %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
+ %sub = sub i32 32, %ctlz
+ %shl = shl i32 1, %sub
+ %sel = select i1 %eq0, i32 0, i32 %shl
+ store i32 %ctlz, ptr %p, align 4
+ ret i32 %sel
+}
+
+; Negative test: sub used twice
+define i32 @bit_floor_sub_used_twice(i32 %x, ptr %p) {
+; CHECK-LABEL: @bit_floor_sub_used_twice(
+; CHECK-NEXT: [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
+; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: ret i32 [[SEL]]
+;
+ %eq0 = icmp eq i32 %x, 0
+ %lshr = lshr i32 %x, 1
+ %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
+ %sub = sub i32 32, %ctlz
+ %shl = shl i32 1, %sub
+ %sel = select i1 %eq0, i32 0, i32 %shl
+ store i32 %sub, ptr %p, align 4
+ ret i32 %sel
+}
+
+; Negative test: shl used twice
+define i32 @bit_floor_shl_used_twice(i32 %x, ptr %p) {
+; CHECK-LABEL: @bit_floor_shl_used_twice(
+; CHECK-NEXT: [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false), !range [[RNG0]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
+; CHECK-NEXT: store i32 [[SHL]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: ret i32 [[SEL]]
+;
+ %eq0 = icmp eq i32 %x, 0
+ %lshr = lshr i32 %x, 1
+ %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
+ %sub = sub i32 32, %ctlz
+ %shl = shl i32 1, %sub
+ %sel = select i1 %eq0, i32 0, i32 %shl
+ store i32 %shl, ptr %p, align 4
+ ret i32 %sel
+}
+
; a vector version of @bit_floor_32 above
define <4 x i32> @bit_floor_v4i32(<4 x i32> %x) {
; CHECK-LABEL: @bit_floor_v4i32(
More information about the llvm-commits
mailing list