[llvm] c5138e6 - [InstCombine] add/adjust tests for add+xor -> shifts; NFC
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 11 06:09:34 PDT 2020
Author: Sanjay Patel
Date: 2020-10-11T09:04:24-04:00
New Revision: c5138e61e16aeee24216f4006bcf9cb6c12d28fd
URL: https://github.com/llvm/llvm-project/commit/c5138e61e16aeee24216f4006bcf9cb6c12d28fd
DIFF: https://github.com/llvm/llvm-project/commit/c5138e61e16aeee24216f4006bcf9cb6c12d28fd.diff
LOG: [InstCombine] add/adjust tests for add+xor -> shifts; NFC
Added:
Modified:
llvm/test/Transforms/InstCombine/signext.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/signext.ll b/llvm/test/Transforms/InstCombine/signext.ll
index df484ca24f8e..f8aa7c9ead10 100644
--- a/llvm/test/Transforms/InstCombine/signext.ll
+++ b/llvm/test/Transforms/InstCombine/signext.ll
@@ -3,62 +3,112 @@
target datalayout = "n8:16:32:64"
-define i32 @test1(i32 %x) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[SEXT:%.*]] = shl i32 %x, 16
-; CHECK-NEXT: [[TMP_3:%.*]] = ashr exact i32 [[SEXT]], 16
-; CHECK-NEXT: ret i32 [[TMP_3]]
+define i32 @sextinreg(i32 %x) {
+; CHECK-LABEL: @sextinreg(
+; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 16
+; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 16
+; CHECK-NEXT: ret i32 [[T3]]
;
- %tmp.1 = and i32 %x, 65535
- %tmp.2 = xor i32 %tmp.1, -32768
- %tmp.3 = add i32 %tmp.2, 32768
- ret i32 %tmp.3
+ %t1 = and i32 %x, 65535
+ %t2 = xor i32 %t1, -32768
+ %t3 = add i32 %t2, 32768
+ ret i32 %t3
}
-define i32 @test2(i32 %x) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[SEXT:%.*]] = shl i32 %x, 16
-; CHECK-NEXT: [[TMP_3:%.*]] = ashr exact i32 [[SEXT]], 16
-; CHECK-NEXT: ret i32 [[TMP_3]]
+define <2 x i32> @sextinreg_splat(<2 x i32> %x) {
+; CHECK-LABEL: @sextinreg_splat(
+; CHECK-NEXT: [[T1:%.*]] = and <2 x i32> [[X:%.*]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[T2:%.*]] = xor <2 x i32> [[T1]], <i32 -32768, i32 -32768>
+; CHECK-NEXT: [[T3:%.*]] = add nsw <2 x i32> [[T2]], <i32 32768, i32 32768>
+; CHECK-NEXT: ret <2 x i32> [[T3]]
;
- %tmp.1 = and i32 %x, 65535
- %tmp.2 = xor i32 %tmp.1, 32768
- %tmp.3 = add i32 %tmp.2, -32768
- ret i32 %tmp.3
+ %t1 = and <2 x i32> %x, <i32 65535, i32 65535>
+ %t2 = xor <2 x i32> %t1, <i32 -32768, i32 -32768>
+ %t3 = add <2 x i32> %t2, <i32 32768, i32 32768>
+ ret <2 x i32> %t3
}
-define i32 @test3(i16 %P) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[TMP_5:%.*]] = sext i16 %P to i32
-; CHECK-NEXT: ret i32 [[TMP_5]]
+define i32 @sextinreg_alt(i32 %x) {
+; CHECK-LABEL: @sextinreg_alt(
+; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 16
+; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 16
+; CHECK-NEXT: ret i32 [[T3]]
;
- %tmp.1 = zext i16 %P to i32
- %tmp.4 = xor i32 %tmp.1, 32768
- %tmp.5 = add i32 %tmp.4, -32768
- ret i32 %tmp.5
+ %t1 = and i32 %x, 65535
+ %t2 = xor i32 %t1, 32768
+ %t3 = add i32 %t2, -32768
+ ret i32 %t3
}
-define i32 @test4(i32 %x) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[SEXT:%.*]] = shl i32 %x, 24
-; CHECK-NEXT: [[TMP_3:%.*]] = ashr exact i32 [[SEXT]], 24
-; CHECK-NEXT: ret i32 [[TMP_3]]
+define <2 x i32> @sextinreg_alt_splat(<2 x i32> %x) {
+; CHECK-LABEL: @sextinreg_alt_splat(
+; CHECK-NEXT: [[T1:%.*]] = and <2 x i32> [[X:%.*]], <i32 65535, i32 65535>
+; CHECK-NEXT: [[T2:%.*]] = xor <2 x i32> [[T1]], <i32 32768, i32 32768>
+; CHECK-NEXT: [[T3:%.*]] = add nsw <2 x i32> [[T2]], <i32 -32768, i32 -32768>
+; CHECK-NEXT: ret <2 x i32> [[T3]]
;
- %tmp.1 = and i32 %x, 255
- %tmp.2 = xor i32 %tmp.1, 128
- %tmp.3 = add i32 %tmp.2, -128
- ret i32 %tmp.3
+ %t1 = and <2 x i32> %x, <i32 65535, i32 65535>
+ %t2 = xor <2 x i32> %t1, <i32 32768, i32 32768>
+ %t3 = add <2 x i32> %t2, <i32 -32768, i32 -32768>
+ ret <2 x i32> %t3
+}
+
+define i32 @sext(i16 %P) {
+; CHECK-LABEL: @sext(
+; CHECK-NEXT: [[T5:%.*]] = sext i16 [[P:%.*]] to i32
+; CHECK-NEXT: ret i32 [[T5]]
+;
+ %t1 = zext i16 %P to i32
+ %t4 = xor i32 %t1, 32768
+ %t5 = add i32 %t4, -32768
+ ret i32 %t5
+}
+
+define <2 x i32> @sext_splat(<2 x i16> %P) {
+; CHECK-LABEL: @sext_splat(
+; CHECK-NEXT: [[T5:%.*]] = sext <2 x i16> [[P:%.*]] to <2 x i32>
+; CHECK-NEXT: ret <2 x i32> [[T5]]
+;
+ %t1 = zext <2 x i16> %P to <2 x i32>
+ %t4 = xor <2 x i32> %t1, <i32 32768, i32 32768>
+ %t5 = add <2 x i32> %t4, <i32 -32768, i32 -32768>
+ ret <2 x i32> %t5
+}
+
+define i32 @sextinreg2(i32 %x) {
+; CHECK-LABEL: @sextinreg2(
+; CHECK-NEXT: [[SEXT:%.*]] = shl i32 [[X:%.*]], 24
+; CHECK-NEXT: [[T3:%.*]] = ashr exact i32 [[SEXT]], 24
+; CHECK-NEXT: ret i32 [[T3]]
+;
+ %t1 = and i32 %x, 255
+ %t2 = xor i32 %t1, 128
+ %t3 = add i32 %t2, -128
+ ret i32 %t3
+}
+
+define <2 x i32> @sextinreg2_splat(<2 x i32> %x) {
+; CHECK-LABEL: @sextinreg2_splat(
+; CHECK-NEXT: [[T1:%.*]] = and <2 x i32> [[X:%.*]], <i32 255, i32 255>
+; CHECK-NEXT: [[T2:%.*]] = xor <2 x i32> [[T1]], <i32 128, i32 128>
+; CHECK-NEXT: [[T3:%.*]] = add nsw <2 x i32> [[T2]], <i32 -128, i32 -128>
+; CHECK-NEXT: ret <2 x i32> [[T3]]
+;
+ %t1 = and <2 x i32> %x, <i32 255, i32 255>
+ %t2 = xor <2 x i32> %t1, <i32 128, i32 128>
+ %t3 = add <2 x i32> %t2, <i32 -128, i32 -128>
+ ret <2 x i32> %t3
}
define i32 @test5(i32 %x) {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: [[TMP_2:%.*]] = shl i32 %x, 16
-; CHECK-NEXT: [[TMP_4:%.*]] = ashr exact i32 [[TMP_2]], 16
-; CHECK-NEXT: ret i32 [[TMP_4]]
+; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X:%.*]], 16
+; CHECK-NEXT: [[T4:%.*]] = ashr exact i32 [[T2]], 16
+; CHECK-NEXT: ret i32 [[T4]]
;
- %tmp.2 = shl i32 %x, 16
- %tmp.4 = ashr i32 %tmp.2, 16
- ret i32 %tmp.4
+ %t2 = shl i32 %x, 16
+ %t4 = ashr i32 %t2, 16
+ ret i32 %t4
}
; If the shift amount equals the
diff erence in width of the destination
@@ -67,20 +117,20 @@ define i32 @test5(i32 %x) {
define i32 @test6(i16 %P) {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: [[TMP_5:%.*]] = sext i16 %P to i32
-; CHECK-NEXT: ret i32 [[TMP_5]]
+; CHECK-NEXT: [[T5:%.*]] = sext i16 [[P:%.*]] to i32
+; CHECK-NEXT: ret i32 [[T5]]
;
- %tmp.1 = zext i16 %P to i32
- %sext1 = shl i32 %tmp.1, 16
- %tmp.5 = ashr i32 %sext1, 16
- ret i32 %tmp.5
+ %t1 = zext i16 %P to i32
+ %sext1 = shl i32 %t1, 16
+ %t5 = ashr i32 %sext1, 16
+ ret i32 %t5
}
; Vectors should get the same fold as above.
define <2 x i32> @test6_splat_vec(<2 x i12> %P) {
; CHECK-LABEL: @test6_splat_vec(
-; CHECK-NEXT: [[ASHR:%.*]] = sext <2 x i12> %P to <2 x i32>
+; CHECK-NEXT: [[ASHR:%.*]] = sext <2 x i12> [[P:%.*]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[ASHR]]
;
%z = zext <2 x i12> %P to <2 x i32>
@@ -89,9 +139,9 @@ define <2 x i32> @test6_splat_vec(<2 x i12> %P) {
ret <2 x i32> %ashr
}
-define i32 @test7(i32 %x) {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: [[SUB:%.*]] = ashr i32 %x, 5
+define i32 @ashr(i32 %x) {
+; CHECK-LABEL: @ashr(
+; CHECK-NEXT: [[SUB:%.*]] = ashr i32 [[X:%.*]], 5
; CHECK-NEXT: ret i32 [[SUB]]
;
%shr = lshr i32 %x, 5
@@ -100,3 +150,15 @@ define i32 @test7(i32 %x) {
ret i32 %sub
}
+define <2 x i32> @ashr_splat(<2 x i32> %x) {
+; CHECK-LABEL: @ashr_splat(
+; CHECK-NEXT: [[SHR:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 5, i32 5>
+; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[SHR]], <i32 67108864, i32 67108864>
+; CHECK-NEXT: [[SUB:%.*]] = add nsw <2 x i32> [[XOR]], <i32 -67108864, i32 -67108864>
+; CHECK-NEXT: ret <2 x i32> [[SUB]]
+;
+ %shr = lshr <2 x i32> %x, <i32 5, i32 5>
+ %xor = xor <2 x i32> %shr, <i32 67108864, i32 67108864>
+ %sub = add <2 x i32> %xor, <i32 -67108864, i32 -67108864>
+ ret <2 x i32> %sub
+}
More information about the llvm-commits
mailing list