[llvm] 5e8cd29 - [RISCV] Add coverage for vector combine reduce(cast x) transformation

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 18 11:39:42 PDT 2024


Author: Philip Reames
Date: 2024-07-18T11:39:25-07:00
New Revision: 5e8cd29d62a72ed18e7bc782554d7f14eccec0ee

URL: https://github.com/llvm/llvm-project/commit/5e8cd29d62a72ed18e7bc782554d7f14eccec0ee
DIFF: https://github.com/llvm/llvm-project/commit/5e8cd29d62a72ed18e7bc782554d7f14eccec0ee.diff

LOG: [RISCV] Add coverage for vector combine reduce(cast x) transformation

This covers both the existing trunc transform - basically checking
that it performs sanely with the RISCV cost model - and a planned
change to handle sext/zext as well.

Added: 
    llvm/test/Transforms/VectorCombine/RISCV/vecreduce-of-cast.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/VectorCombine/RISCV/vecreduce-of-cast.ll b/llvm/test/Transforms/VectorCombine/RISCV/vecreduce-of-cast.ll
new file mode 100644
index 0000000000000..9b1aa19f85c21
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/RISCV/vecreduce-of-cast.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv32 -mattr=+v | FileCheck  %s
+; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv64 -mattr=+v | FileCheck  %s
+
+;
+; Fold reduce(cast(X)) -> trunc(cast(X)) if more cost efficient
+;
+
+define i32 @reduce_add_trunc_v8i64_to_v8i32(<8 x i64> %a0)  {
+; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
+; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    ret i32 [[RED]]
+;
+  %tr = trunc <8 x i64> %a0 to <8 x i32>
+  %red = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %tr)
+  ret i32 %red
+}
+
+define i16 @reduce_add_trunc_v8i64_to_v8i16(<8 x i64> %a0)  {
+; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
+; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i16
+; CHECK-NEXT:    ret i16 [[RED]]
+;
+  %tr = trunc <8 x i64> %a0 to <8 x i16>
+  %red = tail call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tr)
+  ret i16 %red
+}
+
+define i8 @reduce_add_trunc_v8i64_to_v8i8(<8 x i64> %a0)  {
+; CHECK-LABEL: @reduce_add_trunc_v8i64_to_v8i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
+; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[RED]]
+;
+  %tr = trunc <8 x i64> %a0 to <8 x i8>
+  %red = tail call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %tr)
+  ret i8 %red
+}
+
+define i8 @reduce_or_trunc_v8i32_i8(<8 x i32> %a0)  {
+; CHECK-LABEL: @reduce_or_trunc_v8i32_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[A0:%.*]])
+; CHECK-NEXT:    [[RED:%.*]] = trunc i32 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[RED]]
+;
+  %tr = trunc <8 x i32> %a0 to <8 x i8>
+  %red = tail call i8 @llvm.vector.reduce.or.v8i32(<8 x i8> %tr)
+  ret i8 %red
+}
+
+define i8 @reduce_xor_trunc_v16i64_i8(<16 x i64> %a0)  {
+; CHECK-LABEL: @reduce_xor_trunc_v16i64_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> [[A0:%.*]])
+; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[RED]]
+;
+  %tr = trunc <16 x i64> %a0 to <16 x i8>
+  %red = tail call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %tr)
+  ret i8 %red
+}
+
+define i16 @reduce_mul_trunc_v8i64_i16(<8 x i64> %a0)  {
+; CHECK-LABEL: @reduce_mul_trunc_v8i64_i16(
+; CHECK-NEXT:    [[TR:%.*]] = trunc <8 x i64> [[A0:%.*]] to <8 x i16>
+; CHECK-NEXT:    [[RED:%.*]] = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> [[TR]])
+; CHECK-NEXT:    ret i16 [[RED]]
+;
+  %tr = trunc <8 x i64> %a0 to <8 x i16>
+  %red = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %tr)
+  ret i16 %red
+}
+
+define i32 @reduce_or_sext_v8i8_to_v8i32(<8 x i8> %a0)  {
+; CHECK-LABEL: @reduce_or_sext_v8i8_to_v8i32(
+; CHECK-NEXT:    [[TR:%.*]] = sext <8 x i8> [[A0:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TR]])
+; CHECK-NEXT:    ret i32 [[RED]]
+;
+  %tr = sext <8 x i8> %a0 to <8 x i32>
+  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
+  ret i32 %red
+}
+
+define i32 @reduce_or_sext_v8i16_to_v8i32(<8 x i16> %a0)  {
+; CHECK-LABEL: @reduce_or_sext_v8i16_to_v8i32(
+; CHECK-NEXT:    [[TR:%.*]] = sext <8 x i16> [[A0:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TR]])
+; CHECK-NEXT:    ret i32 [[RED]]
+;
+  %tr = sext <8 x i16> %a0 to <8 x i32>
+  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
+  ret i32 %red
+}
+
+define i32 @reduce_or_zext_v8i8_to_v8i32(<8 x i8> %a0)  {
+; CHECK-LABEL: @reduce_or_zext_v8i8_to_v8i32(
+; CHECK-NEXT:    [[TR:%.*]] = zext <8 x i8> [[A0:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TR]])
+; CHECK-NEXT:    ret i32 [[RED]]
+;
+  %tr = zext <8 x i8> %a0 to <8 x i32>
+  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
+  ret i32 %red
+}
+
+define i32 @reduce_or_zext_v8i16_to_v8i32(<8 x i16> %a0)  {
+; CHECK-LABEL: @reduce_or_zext_v8i16_to_v8i32(
+; CHECK-NEXT:    [[TR:%.*]] = zext <8 x i16> [[A0:%.*]] to <8 x i32>
+; CHECK-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TR]])
+; CHECK-NEXT:    ret i32 [[RED]]
+;
+  %tr = zext <8 x i16> %a0 to <8 x i32>
+  %red = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %tr)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
+


        


More information about the llvm-commits mailing list