[llvm] ef78eda - [SLP][NFC]Add a test with the optimizable and and final ext, NFC.

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 29 07:46:30 PDT 2024


Author: Alexey Bataev
Date: 2024-04-29T07:46:21-07:00
New Revision: ef78edafabe72fac1dad1675a5a25d7e2d62bb2c

URL: https://github.com/llvm/llvm-project/commit/ef78edafabe72fac1dad1675a5a25d7e2d62bb2c
DIFF: https://github.com/llvm/llvm-project/commit/ef78edafabe72fac1dad1675a5a25d7e2d62bb2c.diff

LOG: [SLP][NFC]Add a test with the optimizable and and final ext, NFC.

Added: 
    llvm/test/Transforms/SLPVectorizer/RISCV/minbw-with-and-and-scalar-trunc.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/minbw-with-and-and-scalar-trunc.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/minbw-with-and-and-scalar-trunc.ll
new file mode 100644
index 00000000000000..fc977585614baf
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/minbw-with-and-and-scalar-trunc.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
+ at c = global [12 x i64] zeroinitializer
+
+; FIXME: after minbitwidth analysis and i32 conv.., 65535 is transformed to
+; and <4 x i16> , -1, which must be dropped.
+; FIXME: need to adjust the cost of the final transformation, since the user is
+; just a trunc to i16 (it must be free).
+define i16 @test() {
+; CHECK-LABEL: define i16 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i64(ptr align 8 @c, i64 24, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 4)
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <4 x i64> [[TMP0]] to <4 x i16>
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i16> [[TMP1]], <i16 -1, i16 -1, i16 -1, i16 -1>
+; CHECK-NEXT:    [[TMP3:%.*]] = xor <4 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1>
+; CHECK-NEXT:    [[TMP4:%.*]] = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> [[TMP3]])
+; CHECK-NEXT:    [[TMP5:%.*]] = zext i16 [[TMP4]] to i32
+; CHECK-NEXT:    [[T:%.*]] = trunc i32 [[TMP5]] to i16
+; CHECK-NEXT:    ret i16 [[T]]
+;
+entry:
+  %0 = load i64, ptr @c, align 8
+  %conv = trunc i64 %0 to i32
+  %conv3 = and i32 %conv, 65535
+  %conv4 = xor i32 %conv3, 65535
+  %1 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 3), align 8
+  %conv.1 = trunc i64 %1 to i32
+  %conv3.1 = and i32 %conv.1, 65535
+  %conv4.1 = xor i32 %conv3.1, 65535
+  %.conv4.1 = tail call i32 @llvm.umax.i32(i32 %conv4, i32 %conv4.1)
+  %2 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 6), align 8
+  %conv.2 = trunc i64 %2 to i32
+  %conv3.2 = and i32 %conv.2, 65535
+  %conv4.2 = xor i32 %conv3.2, 65535
+  %.conv4.2 = tail call i32 @llvm.umax.i32(i32 %.conv4.1, i32 %conv4.2)
+  %3 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 9), align 8
+  %conv.3 = trunc i64 %3 to i32
+  %conv3.3 = and i32 %conv.3, 65535
+  %conv4.3 = xor i32 %conv3.3, 65535
+  %.conv4.3 = tail call i32 @llvm.umax.i32(i32 %.conv4.2, i32 %conv4.3)
+  %t = trunc i32 %.conv4.3 to i16
+  ret i16 %t
+}


        


More information about the llvm-commits mailing list