[llvm] 33d96bf - [InstCombine] Add vector tests for the or(shl(zext(x),32)|zext(y)) concat combines
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed May 13 10:48:14 PDT 2020
Author: Simon Pilgrim
Date: 2020-05-13T18:48:02+01:00
New Revision: 33d96bf7b9b2add1c45bf1c60195dc7ca961393e
URL: https://github.com/llvm/llvm-project/commit/33d96bf7b9b2add1c45bf1c60195dc7ca961393e
DIFF: https://github.com/llvm/llvm-project/commit/33d96bf7b9b2add1c45bf1c60195dc7ca961393e.diff
LOG: [InstCombine] Add vector tests for the or(shl(zext(x),32)|zext(y)) concat combines
Added:
Modified:
llvm/test/Transforms/InstCombine/or-concat.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/or-concat.ll b/llvm/test/Transforms/InstCombine/or-concat.ll
index 4148e4900f7e..4bbe5896a7f1 100644
--- a/llvm/test/Transforms/InstCombine/or-concat.ll
+++ b/llvm/test/Transforms/InstCombine/or-concat.ll
@@ -28,6 +28,23 @@ define i64 @concat_bswap32_unary_split(i64 %a0) {
ret i64 %9
}
+define <2 x i64> @concat_bswap32_unary_split_vector(<2 x i64> %a0) {
+; CHECK-LABEL: @concat_bswap32_unary_split_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[A0:%.*]])
+; CHECK-NEXT: ret <2 x i64> [[TMP1]]
+;
+ %1 = lshr <2 x i64> %a0, <i64 32, i64 32>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ %3 = trunc <2 x i64> %a0 to <2 x i32>
+ %4 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %2)
+ %5 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %3)
+ %6 = zext <2 x i32> %4 to <2 x i64>
+ %7 = zext <2 x i32> %5 to <2 x i64>
+ %8 = shl nuw <2 x i64> %7, <i64 32, i64 32>
+ %9 = or <2 x i64> %6, %8
+ ret <2 x i64> %9
+}
+
define i64 @concat_bswap32_unary_flip(i64 %a0) {
; CHECK-LABEL: @concat_bswap32_unary_flip(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[A0:%.*]], 32
@@ -48,6 +65,26 @@ define i64 @concat_bswap32_unary_flip(i64 %a0) {
ret i64 %9
}
+define <2 x i64> @concat_bswap32_unary_flip_vector(<2 x i64> %a0) {
+; CHECK-LABEL: @concat_bswap32_unary_flip_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[A0:%.*]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[A0]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP3]])
+; CHECK-NEXT: ret <2 x i64> [[TMP4]]
+;
+ %1 = lshr <2 x i64> %a0, <i64 32, i64 32>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ %3 = trunc <2 x i64> %a0 to <2 x i32>
+ %4 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %2)
+ %5 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %3)
+ %6 = zext <2 x i32> %4 to <2 x i64>
+ %7 = zext <2 x i32> %5 to <2 x i64>
+ %8 = shl nuw <2 x i64> %6, <i64 32, i64 32>
+ %9 = or <2 x i64> %7, %8
+ ret <2 x i64> %9
+}
+
define i64 @concat_bswap32_binary(i32 %a0, i32 %a1) {
; CHECK-LABEL: @concat_bswap32_binary(
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64
@@ -66,7 +103,26 @@ define i64 @concat_bswap32_binary(i32 %a0, i32 %a1) {
ret i64 %6
}
+define <2 x i64> @concat_bswap32_binary_vector(<2 x i32> %a0, <2 x i32> %a1) {
+; CHECK-LABEL: @concat_bswap32_binary_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64>
+; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP4]])
+; CHECK-NEXT: ret <2 x i64> [[TMP5]]
+;
+ %1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a0)
+ %2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a1)
+ %3 = zext <2 x i32> %1 to <2 x i64>
+ %4 = zext <2 x i32> %2 to <2 x i64>
+ %5 = shl nuw <2 x i64> %4, <i64 32, i64 32>
+ %6 = or <2 x i64> %3, %5
+ ret <2 x i64> %6
+}
+
declare i32 @llvm.bswap.i32(i32)
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
; BITREVERSE
@@ -87,6 +143,23 @@ define i64 @concat_bitreverse32_unary_split(i64 %a0) {
ret i64 %9
}
+define <2 x i64> @concat_bitreverse32_unary_split_vector(<2 x i64> %a0) {
+; CHECK-LABEL: @concat_bitreverse32_unary_split_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[A0:%.*]])
+; CHECK-NEXT: ret <2 x i64> [[TMP1]]
+;
+ %1 = lshr <2 x i64> %a0, <i64 32, i64 32>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ %3 = trunc <2 x i64> %a0 to <2 x i32>
+ %4 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %2)
+ %5 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %3)
+ %6 = zext <2 x i32> %4 to <2 x i64>
+ %7 = zext <2 x i32> %5 to <2 x i64>
+ %8 = shl nuw <2 x i64> %7, <i64 32, i64 32>
+ %9 = or <2 x i64> %6, %8
+ ret <2 x i64> %9
+}
+
define i64 @concat_bitreverse32_unary_flip(i64 %a0) {
; CHECK-LABEL: @concat_bitreverse32_unary_flip(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[A0:%.*]], 32
@@ -107,6 +180,26 @@ define i64 @concat_bitreverse32_unary_flip(i64 %a0) {
ret i64 %9
}
+define <2 x i64> @concat_bitreverse32_unary_flip_vector(<2 x i64> %a0) {
+; CHECK-LABEL: @concat_bitreverse32_unary_flip_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[A0:%.*]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[A0]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP3]])
+; CHECK-NEXT: ret <2 x i64> [[TMP4]]
+;
+ %1 = lshr <2 x i64> %a0, <i64 32, i64 32>
+ %2 = trunc <2 x i64> %1 to <2 x i32>
+ %3 = trunc <2 x i64> %a0 to <2 x i32>
+ %4 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %2)
+ %5 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %3)
+ %6 = zext <2 x i32> %4 to <2 x i64>
+ %7 = zext <2 x i32> %5 to <2 x i64>
+ %8 = shl nuw <2 x i64> %6, <i64 32, i64 32>
+ %9 = or <2 x i64> %7, %8
+ ret <2 x i64> %9
+}
+
define i64 @concat_bitreverse32_binary(i32 %a0, i32 %a1) {
; CHECK-LABEL: @concat_bitreverse32_binary(
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64
@@ -125,4 +218,23 @@ define i64 @concat_bitreverse32_binary(i32 %a0, i32 %a1) {
ret i64 %6
}
+define <2 x i64> @concat_bitreverse32_binary_vector(<2 x i32> %a0, <2 x i32> %a1) {
+; CHECK-LABEL: @concat_bitreverse32_binary_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64>
+; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], <i64 32, i64 32>
+; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP4]])
+; CHECK-NEXT: ret <2 x i64> [[TMP5]]
+;
+ %1 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %a0)
+ %2 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %a1)
+ %3 = zext <2 x i32> %1 to <2 x i64>
+ %4 = zext <2 x i32> %2 to <2 x i64>
+ %5 = shl nuw <2 x i64> %4, <i64 32, i64 32>
+ %6 = or <2 x i64> %3, %5
+ ret <2 x i64> %6
+}
+
declare i32 @llvm.bitreverse.i32(i32)
+declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
More information about the llvm-commits
mailing list