[llvm] SLP/RISCV: add test for vectorized ctpop, like in X86 (PR #65330)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 7 06:56:07 PDT 2023


https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/65330:

>From e21cfd683f22c84b2832ba953c10467e3a0e56a0 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <Ramkumar.Ramachandra at imgtec.com>
Date: Tue, 5 Sep 2023 15:19:28 +0100
Subject: [PATCH] SLP/RISCV: add test for vectorized ctpop, like in X86

Recently, 7f26c27 turned on SLP by default for RISC-V, and although
there are quite a few tests for SLP under the X86/ target, it is unclear
whether the same constructs would be vectorized on RISC-V. This patch
takes a step in the direction of remedying this, by noticing that ctpop
is often vectorized on RISC-V, and adding four tests for different
integer widths.
---
 .../Transforms/SLPVectorizer/RISCV/ctpop.ll   | 110 ++++++++++++++++++
 1 file changed, 110 insertions(+)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/RISCV/ctpop.ll

diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/ctpop.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/ctpop.ll
new file mode 100644
index 00000000000000..fa6c83204fbd3f
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/ctpop.ll
@@ -0,0 +1,110 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=riscv32 -mattr=+m,+v | FileCheck %s
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=riscv64 -mattr=+m,+v | FileCheck %s
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=riscv32 -mattr=+v,+experimental-zvbb | FileCheck %s
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=riscv64 -mattr=+v,+experimental-zvbb | FileCheck %s
+
+define <4 x i8> @ctpop_v4i8(ptr %a) {
+; CHECK-LABEL: define <4 x i8> @ctpop_v4i8
+; CHECK-SAME: (ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[A]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> [[TMP0]])
+; CHECK-NEXT:    ret <4 x i8> [[TMP1]]
+;
+entry:
+  %0 = load <4 x i8>, ptr %a
+  %vecext = extractelement <4 x i8> %0, i8 0
+  %1 = call i8 @llvm.ctpop.i8(i8 %vecext)
+  %vecins = insertelement <4 x i8> undef, i8 %1, i8 0
+  %vecext.1 = extractelement <4 x i8> %0, i8 1
+  %2 = call i8 @llvm.ctpop.i8(i8 %vecext.1)
+  %vecins.1 = insertelement <4 x i8> %vecins, i8 %2, i8 1
+  %vecext.2 = extractelement <4 x i8> %0, i8 2
+  %3 = call i8 @llvm.ctpop.i8(i8 %vecext.2)
+  %vecins.2 = insertelement <4 x i8> %vecins.1, i8 %3, i8 2
+  %vecext.3 = extractelement <4 x i8> %0, i8 3
+  %4 = call i8 @llvm.ctpop.i8(i8 %vecext.3)
+  %vecins.3 = insertelement <4 x i8> %vecins.2, i8 %4, i8 3
+  ret <4 x i8> %vecins.3
+}
+
+define <4 x i16> @ctpop_v4i16(ptr %a) {
+; CHECK-LABEL: define <4 x i16> @ctpop_v4i16
+; CHECK-SAME: (ptr [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> [[TMP0]])
+; CHECK-NEXT:    ret <4 x i16> [[TMP1]]
+;
+entry:
+  %0 = load <4 x i16>, ptr %a
+  %vecext = extractelement <4 x i16> %0, i16 0
+  %1 = call i16 @llvm.ctpop.i16(i16 %vecext)
+  %vecins = insertelement <4 x i16> undef, i16 %1, i16 0
+  %vecext.1 = extractelement <4 x i16> %0, i16 1
+  %2 = call i16 @llvm.ctpop.i16(i16 %vecext.1)
+  %vecins.1 = insertelement <4 x i16> %vecins, i16 %2, i16 1
+  %vecext.2 = extractelement <4 x i16> %0, i16 2
+  %3 = call i16 @llvm.ctpop.i16(i16 %vecext.2)
+  %vecins.2 = insertelement <4 x i16> %vecins.1, i16 %3, i16 2
+  %vecext.3 = extractelement <4 x i16> %0, i16 3
+  %4 = call i16 @llvm.ctpop.i16(i16 %vecext.3)
+  %vecins.3 = insertelement <4 x i16> %vecins.2, i16 %4, i16 3
+  ret <4 x i16> %vecins.3
+}
+
+define <4 x i32> @ctpop_v4i32(ptr %a) {
+; CHECK-LABEL: define <4 x i32> @ctpop_v4i32
+; CHECK-SAME: (ptr [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> [[TMP0]])
+; CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+;
+entry:
+  %0 = load <4 x i32>, ptr %a
+  %vecext = extractelement <4 x i32> %0, i32 0
+  %1 = call i32 @llvm.ctpop.i32(i32 %vecext)
+  %vecins = insertelement <4 x i32> undef, i32 %1, i32 0
+  %vecext.1 = extractelement <4 x i32> %0, i32 1
+  %2 = call i32 @llvm.ctpop.i32(i32 %vecext.1)
+  %vecins.1 = insertelement <4 x i32> %vecins, i32 %2, i32 1
+  %vecext.2 = extractelement <4 x i32> %0, i32 2
+  %3 = call i32 @llvm.ctpop.i32(i32 %vecext.2)
+  %vecins.2 = insertelement <4 x i32> %vecins.1, i32 %3, i32 2
+  %vecext.3 = extractelement <4 x i32> %0, i32 3
+  %4 = call i32 @llvm.ctpop.i32(i32 %vecext.3)
+  %vecins.3 = insertelement <4 x i32> %vecins.2, i32 %4, i32 3
+  ret <4 x i32> %vecins.3
+}
+
+define <4 x i64> @ctpop_v4i64(ptr %a) {
+; CHECK-LABEL: define <4 x i64> @ctpop_v4i64
+; CHECK-SAME: (ptr [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr [[A]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> [[TMP0]])
+; CHECK-NEXT:    ret <4 x i64> [[TMP1]]
+;
+entry:
+  %0 = load <4 x i64>, ptr %a
+  %vecext = extractelement <4 x i64> %0, i32 0
+  %1 = call i64 @llvm.ctpop.i64(i64 %vecext)
+  %vecins = insertelement <4 x i64> undef, i64 %1, i64 0
+  %vecext.1 = extractelement <4 x i64> %0, i32 1
+  %2 = call i64 @llvm.ctpop.i64(i64 %vecext.1)
+  %vecins.1 = insertelement <4 x i64> %vecins, i64 %2, i64 1
+  %vecext.2 = extractelement <4 x i64> %0, i32 2
+  %3 = call i64 @llvm.ctpop.i64(i64 %vecext.2)
+  %vecins.2 = insertelement <4 x i64> %vecins.1, i64 %3, i64 2
+  %vecext.3 = extractelement <4 x i64> %0, i32 3
+  %4 = call i64 @llvm.ctpop.i64(i64 %vecext.3)
+  %vecins.3 = insertelement <4 x i64> %vecins.2, i64 %4, i64 3
+  ret <4 x i64> %vecins.3
+}
+
+declare i8 @llvm.ctpop.i8(i8)
+declare i16 @llvm.ctpop.i16(i16)
+declare i32 @llvm.ctpop.i32(i32)
+declare i64 @llvm.ctpop.i64(i64)



More information about the llvm-commits mailing list