[llvm] 1062595 - [RISCV][SLP] Add some basic test coverage

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 11 13:05:23 PDT 2022


Author: Philip Reames
Date: 2022-08-11T13:05:14-07:00
New Revision: 1062595808a90d1f3079d142cae351f036d38ebe

URL: https://github.com/llvm/llvm-project/commit/1062595808a90d1f3079d142cae351f036d38ebe
DIFF: https://github.com/llvm/llvm-project/commit/1062595808a90d1f3079d142cae351f036d38ebe.diff

LOG: [RISCV][SLP] Add some basic test coverage

Added: 
    llvm/test/Transforms/SLPVectorizer/RISCV/load-store.ll
    llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/load-store.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/load-store.ll
new file mode 100644
index 0000000000000..ed520392b69d6
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/load-store.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -mtriple=riscv64 -mattr=+v \
+; RUN: -riscv-v-vector-bits-min=-1 -S | FileCheck %s --check-prefixes=CHECK
+
+define void @simple_copy(ptr %dest, ptr %p) {
+; CHECK-LABEL: @simple_copy(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store <2 x i16> [[TMP0]], ptr [[DEST:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %e0 = load i16, ptr %p, align 4
+  %inc = getelementptr inbounds i16, ptr %p, i64 1
+  %e1 = load i16, ptr %inc, align 2
+
+  store i16 %e0, ptr %dest, align 4
+  %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
+  store i16 %e1, ptr %inc2, align 2
+  ret void
+}
+
+define void @vec_add(ptr %dest, ptr %p) {
+; CHECK-LABEL: @vec_add(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i16> [[TMP0]], <i16 1, i16 1>
+; CHECK-NEXT:    store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %e0 = load i16, ptr %p, align 4
+  %inc = getelementptr inbounds i16, ptr %p, i64 1
+  %e1 = load i16, ptr %inc, align 2
+
+  %a0 = add i16 %e0, 1
+  %a1 = add i16 %e1, 1
+
+  store i16 %a0, ptr %dest, align 4
+  %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
+  store i16 %a1, ptr %inc2, align 2
+  ret void
+}
+
+
+define void @splat_store(ptr %dest, ptr %p) {
+; CHECK-LABEL: @splat_store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[E0:%.*]] = load i16, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store i16 [[E0]], ptr [[DEST:%.*]], align 4
+; CHECK-NEXT:    [[INC2:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 1
+; CHECK-NEXT:    store i16 [[E0]], ptr [[INC2]], align 2
+; CHECK-NEXT:    ret void
+;
+entry:
+  %e0 = load i16, ptr %p, align 4
+
+  store i16 %e0, ptr %dest, align 4
+  %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
+  store i16 %e0, ptr %inc2, align 2
+  ret void
+}
+

diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
new file mode 100644
index 0000000000000..2fcd6bdd294bc
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll
@@ -0,0 +1,569 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -slp-vectorizer -mtriple=riscv64 -mattr=+v \
+; RUN: -riscv-v-vector-bits-min=128 -S | FileCheck %s --check-prefixes=CHECK
+; RUN: opt < %s -slp-vectorizer -mtriple=riscv64 -mattr=+v \
+; RUN: -riscv-v-vector-bits-min=256 -S | FileCheck %s --check-prefixes=CHECK
+; RUN: opt < %s -slp-vectorizer -mtriple=riscv64 -mattr=+v \
+; RUN: -riscv-v-vector-bits-min=512 -S | FileCheck %s --check-prefixes=CHECK
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
+target triple = "riscv64"
+
+; First batch of tests are simple reductions of various widths
+
+define i64 @red_ld_2xi64(ptr %ptr) {
+; CHECK-LABEL: @red_ld_2xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LD0:%.*]] = load i64, ptr [[PTR:%.*]], align 8
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 1
+; CHECK-NEXT:    [[LD1:%.*]] = load i64, ptr [[GEP]], align 8
+; CHECK-NEXT:    [[ADD_1:%.*]] = add nuw nsw i64 [[LD0]], [[LD1]]
+; CHECK-NEXT:    ret i64 [[ADD_1]]
+;
+entry:
+  %ld0 = load i64, ptr %ptr
+  %gep = getelementptr inbounds i64, ptr %ptr, i64 1
+  %ld1 = load i64, ptr %gep
+  %add.1 = add nuw nsw i64 %ld0, %ld1
+  ret i64 %add.1
+}
+
+define i64 @red_ld_4xi64(ptr %ptr) {
+; CHECK-LABEL: @red_ld_4xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr [[PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP0]])
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+entry:
+  %ld0 = load i64, ptr %ptr
+  %gep = getelementptr inbounds i64, ptr %ptr, i64 1
+  %ld1 = load i64, ptr %gep
+  %add.1 = add nuw nsw i64 %ld0, %ld1
+  %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2
+  %ld2 = load i64, ptr %gep.1
+  %add.2 = add nuw nsw i64 %add.1, %ld2
+  %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3
+  %ld3 = load i64, ptr %gep.2
+  %add.3 = add nuw nsw i64 %add.2, %ld3
+  ret i64 %add.3
+}
+
+define i64 @red_ld_8xi64(ptr %ptr) {
+; CHECK-LABEL: @red_ld_8xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i64>, ptr [[PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP0]])
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+entry:
+  %ld0 = load i64, ptr %ptr
+  %gep = getelementptr inbounds i64, ptr %ptr, i64 1
+  %ld1 = load i64, ptr %gep
+  %add.1 = add nuw nsw i64 %ld0, %ld1
+  %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2
+  %ld2 = load i64, ptr %gep.1
+  %add.2 = add nuw nsw i64 %add.1, %ld2
+  %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3
+  %ld3 = load i64, ptr %gep.2
+  %add.3 = add nuw nsw i64 %add.2, %ld3
+  %gep.3 = getelementptr inbounds i64, ptr %ptr, i64 4
+  %ld4 = load i64, ptr %gep.3
+  %add.4 = add nuw nsw i64 %add.3, %ld4
+  %gep.4 = getelementptr inbounds i64, ptr %ptr, i64 5
+  %ld5 = load i64, ptr %gep.4
+  %add.5 = add nuw nsw i64 %add.4, %ld5
+  %gep.5 = getelementptr inbounds i64, ptr %ptr, i64 6
+  %ld6 = load i64, ptr %gep.5
+  %add.6 = add nuw nsw i64 %add.5, %ld6
+  %gep.6 = getelementptr inbounds i64, ptr %ptr, i64 7
+  %ld7 = load i64, ptr %gep.6
+  %add.7 = add nuw nsw i64 %add.6, %ld7
+  ret i64 %add.7
+}
+
+define i64 @red_ld_16xi64(ptr %ptr) {
+; CHECK-LABEL: @red_ld_16xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i64>, ptr [[PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP0]])
+; CHECK-NEXT:    ret i64 [[TMP1]]
+;
+entry:
+  %ld0 = load i64, ptr %ptr
+  %gep = getelementptr inbounds i64, ptr %ptr, i64 1
+  %ld1 = load i64, ptr %gep
+  %add.1 = add nuw nsw i64 %ld0, %ld1
+  %gep.1 = getelementptr inbounds i64, ptr %ptr, i64 2
+  %ld2 = load i64, ptr %gep.1
+  %add.2 = add nuw nsw i64 %add.1, %ld2
+  %gep.2 = getelementptr inbounds i64, ptr %ptr, i64 3
+  %ld3 = load i64, ptr %gep.2
+  %add.3 = add nuw nsw i64 %add.2, %ld3
+  %gep.3 = getelementptr inbounds i64, ptr %ptr, i64 4
+  %ld4 = load i64, ptr %gep.3
+  %add.4 = add nuw nsw i64 %add.3, %ld4
+  %gep.4 = getelementptr inbounds i64, ptr %ptr, i64 5
+  %ld5 = load i64, ptr %gep.4
+  %add.5 = add nuw nsw i64 %add.4, %ld5
+  %gep.5 = getelementptr inbounds i64, ptr %ptr, i64 6
+  %ld6 = load i64, ptr %gep.5
+  %add.6 = add nuw nsw i64 %add.5, %ld6
+  %gep.6 = getelementptr inbounds i64, ptr %ptr, i64 7
+  %ld7 = load i64, ptr %gep.6
+  %add.7 = add nuw nsw i64 %add.6, %ld7
+  %gep.7 = getelementptr inbounds i64, ptr %ptr, i64 8
+  %ld8 = load i64, ptr %gep.7
+  %add.8 = add nuw nsw i64 %add.7, %ld8
+  %gep.8 = getelementptr inbounds i64, ptr %ptr, i64 9
+  %ld9 = load i64, ptr %gep.8
+  %add.9 = add nuw nsw i64 %add.8, %ld9
+  %gep.9 = getelementptr inbounds i64, ptr %ptr, i64 10
+  %ld10 = load i64, ptr %gep.9
+  %add.10 = add nuw nsw i64 %add.9, %ld10
+  %gep.10 = getelementptr inbounds i64, ptr %ptr, i64 11
+  %ld11 = load i64, ptr %gep.10
+  %add.11 = add nuw nsw i64 %add.10, %ld11
+  %gep.11 = getelementptr inbounds i64, ptr %ptr, i64 12
+  %ld12 = load i64, ptr %gep.11
+  %add.12 = add nuw nsw i64 %add.11, %ld12
+  %gep.12 = getelementptr inbounds i64, ptr %ptr, i64 13
+  %ld13 = load i64, ptr %gep.12
+  %add.13 = add nuw nsw i64 %add.12, %ld13
+  %gep.13 = getelementptr inbounds i64, ptr %ptr, i64 14
+  %ld14 = load i64, ptr %gep.13
+  %add.14 = add nuw nsw i64 %add.13, %ld14
+  %gep.14 = getelementptr inbounds i64, ptr %ptr, i64 15
+  %ld15 = load i64, ptr %gep.14
+  %add.15 = add nuw nsw i64 %add.14, %ld15
+  ret i64 %add.15
+}
+
+; Next batch test 
diff eren reductions kinds
+
+%struct.buf = type { [8 x i8] }
+
+define i8 @reduce_and(ptr %a, ptr %b) {
+; CHECK-LABEL: @reduce_and(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]])
+; CHECK-NEXT:    [[OP_RDX:%.*]] = and i8 [[TMP3]], 1
+; CHECK-NEXT:    ret i8 [[OP_RDX]]
+;
+entry:
+  %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
+  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
+  %1 = load i8, i8* %arrayidx3, align 1
+  %xor12 = xor i8 %1, %0
+  %and13 = and i8 %xor12, 1
+  %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
+  %2 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
+  %3 = load i8, i8* %arrayidx3.1, align 1
+  %xor12.1 = xor i8 %3, %2
+  %and13.1 = and i8 %xor12.1, %and13
+  %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
+  %4 = load i8, i8* %arrayidx.2, align 1
+  %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
+  %5 = load i8, i8* %arrayidx3.2, align 1
+  %xor12.2 = xor i8 %5, %4
+  %and13.2 = and i8 %xor12.2, %and13.1
+  %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
+  %6 = load i8, i8* %arrayidx.3, align 1
+  %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
+  %7 = load i8, i8* %arrayidx3.3, align 1
+  %xor12.3 = xor i8 %7, %6
+  %and13.3 = and i8 %xor12.3, %and13.2
+  %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
+  %8 = load i8, i8* %arrayidx.4, align 1
+  %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
+  %9 = load i8, i8* %arrayidx3.4, align 1
+  %xor12.4 = xor i8 %9, %8
+  %and13.4 = and i8 %xor12.4, %and13.3
+  %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
+  %10 = load i8, i8* %arrayidx.5, align 1
+  %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
+  %11 = load i8, i8* %arrayidx3.5, align 1
+  %xor12.5 = xor i8 %11, %10
+  %and13.5 = and i8 %xor12.5, %and13.4
+  %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
+  %12 = load i8, i8* %arrayidx.6, align 1
+  %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
+  %13 = load i8, i8* %arrayidx3.6, align 1
+  %xor12.6 = xor i8 %13, %12
+  %and13.6 = and i8 %xor12.6, %and13.5
+  %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
+  %14 = load i8, i8* %arrayidx.7, align 1
+  %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
+  %15 = load i8, i8* %arrayidx3.7, align 1
+  %xor12.7 = xor i8 %15, %14
+  %and13.7 = and i8 %xor12.7, %and13.6
+  ret i8 %and13.7
+}
+
+define i8 @reduce_or(ptr %a, ptr %b) {
+; CHECK-LABEL: @reduce_or(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP2]])
+; CHECK-NEXT:    ret i8 [[TMP3]]
+;
+
+entry:
+  %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
+  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
+  %1 = load i8, i8* %arrayidx3, align 1
+  %xor12 = xor i8 %1, %0
+  %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
+  %2 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
+  %3 = load i8, i8* %arrayidx3.1, align 1
+  %xor12.1 = xor i8 %3, %2
+  %or13.1 = or i8 %xor12.1, %xor12
+  %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
+  %4 = load i8, i8* %arrayidx.2, align 1
+  %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
+  %5 = load i8, i8* %arrayidx3.2, align 1
+  %xor12.2 = xor i8 %5, %4
+  %or13.2 = or i8 %xor12.2, %or13.1
+  %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
+  %6 = load i8, i8* %arrayidx.3, align 1
+  %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
+  %7 = load i8, i8* %arrayidx3.3, align 1
+  %xor12.3 = xor i8 %7, %6
+  %or13.3 = or i8 %xor12.3, %or13.2
+  %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
+  %8 = load i8, i8* %arrayidx.4, align 1
+  %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
+  %9 = load i8, i8* %arrayidx3.4, align 1
+  %xor12.4 = xor i8 %9, %8
+  %or13.4 = or i8 %xor12.4, %or13.3
+  %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
+  %10 = load i8, i8* %arrayidx.5, align 1
+  %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
+  %11 = load i8, i8* %arrayidx3.5, align 1
+  %xor12.5 = xor i8 %11, %10
+  %or13.5 = or i8 %xor12.5, %or13.4
+  %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
+  %12 = load i8, i8* %arrayidx.6, align 1
+  %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
+  %13 = load i8, i8* %arrayidx3.6, align 1
+  %xor12.6 = xor i8 %13, %12
+  %or13.6 = or i8 %xor12.6, %or13.5
+  %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
+  %14 = load i8, i8* %arrayidx.7, align 1
+  %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
+  %15 = load i8, i8* %arrayidx3.7, align 1
+  %xor12.7 = xor i8 %15, %14
+  %or13.7 = or i8 %xor12.7, %or13.6
+  ret i8 %or13.7
+}
+
+define i8 @reduce_xor(ptr %a, ptr %b) {
+; CHECK-LABEL: @reduce_xor(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP2]])
+; CHECK-NEXT:    [[OP_RDX:%.*]] = xor i8 [[TMP3]], 1
+; CHECK-NEXT:    ret i8 [[OP_RDX]]
+;
+entry:
+  %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
+  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
+  %1 = load i8, i8* %arrayidx3, align 1
+  %and12 = and i8 %1, %0
+  %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
+  %2 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
+  %3 = load i8, i8* %arrayidx3.1, align 1
+  %and12.1 = and i8 %3, %2
+  %4 = xor i8 %and12, %and12.1
+  %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
+  %5 = load i8, i8* %arrayidx.2, align 1
+  %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
+  %6 = load i8, i8* %arrayidx3.2, align 1
+  %and12.2 = and i8 %6, %5
+  %7 = xor i8 %4, %and12.2
+  %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
+  %8 = load i8, i8* %arrayidx.3, align 1
+  %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
+  %9 = load i8, i8* %arrayidx3.3, align 1
+  %and12.3 = and i8 %9, %8
+  %10 = xor i8 %7, %and12.3
+  %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
+  %11 = load i8, i8* %arrayidx.4, align 1
+  %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
+  %12 = load i8, i8* %arrayidx3.4, align 1
+  %and12.4 = and i8 %12, %11
+  %13 = xor i8 %10, %and12.4
+  %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
+  %14 = load i8, i8* %arrayidx.5, align 1
+  %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
+  %15 = load i8, i8* %arrayidx3.5, align 1
+  %and12.5 = and i8 %15, %14
+  %16 = xor i8 %13, %and12.5
+  %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
+  %17 = load i8, i8* %arrayidx.6, align 1
+  %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
+  %18 = load i8, i8* %arrayidx3.6, align 1
+  %and12.6 = and i8 %18, %17
+  %19 = xor i8 %16, %and12.6
+  %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
+  %20 = load i8, i8* %arrayidx.7, align 1
+  %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
+  %21 = load i8, i8* %arrayidx3.7, align 1
+  %and12.7 = and i8 %21, %20
+  %22 = xor i8 %19, %and12.7
+  %xor13.7 = xor i8 %22, 1
+  ret i8 %xor13.7
+}
+
+
+
+define i8 @reduce_add(ptr %a, ptr %b) {
+; CHECK-LABEL: @reduce_add(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], ptr [[A:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], ptr [[B:%.*]], i64 0, i32 0, i64 0
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[TMP2]])
+; CHECK-NEXT:    [[OP_RDX:%.*]] = add i8 [[TMP3]], 1
+; CHECK-NEXT:    ret i8 [[OP_RDX]]
+;
+entry:
+  %arrayidx = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 0
+  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 0
+  %1 = load i8, i8* %arrayidx3, align 1
+  %and12 = and i8 %1, %0
+  %arrayidx.1 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 1
+  %2 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx3.1 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 1
+  %3 = load i8, i8* %arrayidx3.1, align 1
+  %and12.1 = and i8 %3, %2
+  %4 = add i8 %and12, %and12.1
+  %arrayidx.2 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 2
+  %5 = load i8, i8* %arrayidx.2, align 1
+  %arrayidx3.2 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 2
+  %6 = load i8, i8* %arrayidx3.2, align 1
+  %and12.2 = and i8 %6, %5
+  %7 = add i8 %4, %and12.2
+  %arrayidx.3 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 3
+  %8 = load i8, i8* %arrayidx.3, align 1
+  %arrayidx3.3 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 3
+  %9 = load i8, i8* %arrayidx3.3, align 1
+  %and12.3 = and i8 %9, %8
+  %10 = add i8 %7, %and12.3
+  %arrayidx.4 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 4
+  %11 = load i8, i8* %arrayidx.4, align 1
+  %arrayidx3.4 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 4
+  %12 = load i8, i8* %arrayidx3.4, align 1
+  %and12.4 = and i8 %12, %11
+  %13 = add i8 %10, %and12.4
+  %arrayidx.5 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 5
+  %14 = load i8, i8* %arrayidx.5, align 1
+  %arrayidx3.5 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 5
+  %15 = load i8, i8* %arrayidx3.5, align 1
+  %and12.5 = and i8 %15, %14
+  %16 = add i8 %13, %and12.5
+  %arrayidx.6 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 6
+  %17 = load i8, i8* %arrayidx.6, align 1
+  %arrayidx3.6 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 6
+  %18 = load i8, i8* %arrayidx3.6, align 1
+  %and12.6 = and i8 %18, %17
+  %19 = add i8 %16, %and12.6
+  %arrayidx.7 = getelementptr inbounds %struct.buf, ptr %a, i64 0, i32 0, i64 7
+  %20 = load i8, i8* %arrayidx.7, align 1
+  %arrayidx3.7 = getelementptr inbounds %struct.buf, ptr %b, i64 0, i32 0, i64 7
+  %21 = load i8, i8* %arrayidx3.7, align 1
+  %and12.7 = and i8 %21, %20
+  %22 = add i8 %19, %and12.7
+  %add13.7 = add i8 %22, 1
+  ret i8 %add13.7
+}
+
+
+; Next batch exercise reductions involing zext of narrower loads
+
+define i64 @red_zext_ld_2xi64(ptr %ptr) {
+; CHECK-LABEL: @red_zext_ld_2xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LD0:%.*]] = load i8, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT:    [[ZEXT:%.*]] = zext i8 [[LD0]] to i64
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT:    [[LD1:%.*]] = load i8, ptr [[GEP]], align 1
+; CHECK-NEXT:    [[ZEXT_1:%.*]] = zext i8 [[LD1]] to i64
+; CHECK-NEXT:    [[ADD_1:%.*]] = add nuw nsw i64 [[ZEXT]], [[ZEXT_1]]
+; CHECK-NEXT:    ret i64 [[ADD_1]]
+;
+entry:
+  %ld0 = load i8, ptr %ptr
+  %zext = zext i8 %ld0 to i64
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 1
+  %ld1 = load i8, ptr %gep
+  %zext.1 = zext i8 %ld1 to i64
+  %add.1 = add nuw nsw i64 %zext, %zext.1
+  ret i64 %add.1
+}
+
+define i64 @red_zext_ld_4xi64(ptr %ptr) {
+; CHECK-LABEL: @red_zext_ld_4xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %ld0 = load i8, ptr %ptr
+  %zext = zext i8 %ld0 to i64
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 1
+  %ld1 = load i8, ptr %gep
+  %zext.1 = zext i8 %ld1 to i64
+  %add.1 = add nuw nsw i64 %zext, %zext.1
+  %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %ld2 = load i8, ptr %gep.1
+  %zext.2 = zext i8 %ld2 to i64
+  %add.2 = add nuw nsw i64 %add.1, %zext.2
+  %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %ld3 = load i8, ptr %gep.2
+  %zext.3 = zext i8 %ld3 to i64
+  %add.3 = add nuw nsw i64 %add.2, %zext.3
+  ret i64 %add.3
+}
+
+define i64 @red_zext_ld_8xi64(ptr %ptr) {
+; CHECK-LABEL: @red_zext_ld_8xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %ld0 = load i8, ptr %ptr
+  %zext = zext i8 %ld0 to i64
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 1
+  %ld1 = load i8, ptr %gep
+  %zext.1 = zext i8 %ld1 to i64
+  %add.1 = add nuw nsw i64 %zext, %zext.1
+  %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %ld2 = load i8, ptr %gep.1
+  %zext.2 = zext i8 %ld2 to i64
+  %add.2 = add nuw nsw i64 %add.1, %zext.2
+  %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %ld3 = load i8, ptr %gep.2
+  %zext.3 = zext i8 %ld3 to i64
+  %add.3 = add nuw nsw i64 %add.2, %zext.3
+  %gep.3 = getelementptr inbounds i8, ptr %ptr, i64 4
+  %ld4 = load i8, ptr %gep.3
+  %zext.4 = zext i8 %ld4 to i64
+  %add.4 = add nuw nsw i64 %add.3, %zext.4
+  %gep.4 = getelementptr inbounds i8, ptr %ptr, i64 5
+  %ld5 = load i8, ptr %gep.4
+  %zext.5 = zext i8 %ld5 to i64
+  %add.5 = add nuw nsw i64 %add.4, %zext.5
+  %gep.5 = getelementptr inbounds i8, ptr %ptr, i64 6
+  %ld6 = load i8, ptr %gep.5
+  %zext.6 = zext i8 %ld6 to i64
+  %add.6 = add nuw nsw i64 %add.5, %zext.6
+  %gep.6 = getelementptr inbounds i8, ptr %ptr, i64 7
+  %ld7 = load i8, ptr %gep.6
+  %zext.7 = zext i8 %ld7 to i64
+  %add.7 = add nuw nsw i64 %add.6, %zext.7
+  ret i64 %add.7
+}
+
+define i64 @red_zext_ld_16xi64(ptr %ptr) {
+; CHECK-LABEL: @red_zext_ld_16xi64(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = zext <16 x i8> [[TMP0]] to <16 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP1]])
+; CHECK-NEXT:    ret i64 [[TMP2]]
+;
+entry:
+  %ld0 = load i8, ptr %ptr
+  %zext = zext i8 %ld0 to i64
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 1
+  %ld1 = load i8, ptr %gep
+  %zext.1 = zext i8 %ld1 to i64
+  %add.1 = add nuw nsw i64 %zext, %zext.1
+  %gep.1 = getelementptr inbounds i8, ptr %ptr, i64 2
+  %ld2 = load i8, ptr %gep.1
+  %zext.2 = zext i8 %ld2 to i64
+  %add.2 = add nuw nsw i64 %add.1, %zext.2
+  %gep.2 = getelementptr inbounds i8, ptr %ptr, i64 3
+  %ld3 = load i8, ptr %gep.2
+  %zext.3 = zext i8 %ld3 to i64
+  %add.3 = add nuw nsw i64 %add.2, %zext.3
+  %gep.3 = getelementptr inbounds i8, ptr %ptr, i64 4
+  %ld4 = load i8, ptr %gep.3
+  %zext.4 = zext i8 %ld4 to i64
+  %add.4 = add nuw nsw i64 %add.3, %zext.4
+  %gep.4 = getelementptr inbounds i8, ptr %ptr, i64 5
+  %ld5 = load i8, ptr %gep.4
+  %zext.5 = zext i8 %ld5 to i64
+  %add.5 = add nuw nsw i64 %add.4, %zext.5
+  %gep.5 = getelementptr inbounds i8, ptr %ptr, i64 6
+  %ld6 = load i8, ptr %gep.5
+  %zext.6 = zext i8 %ld6 to i64
+  %add.6 = add nuw nsw i64 %add.5, %zext.6
+  %gep.6 = getelementptr inbounds i8, ptr %ptr, i64 7
+  %ld7 = load i8, ptr %gep.6
+  %zext.7 = zext i8 %ld7 to i64
+  %add.7 = add nuw nsw i64 %add.6, %zext.7
+  %gep.7 = getelementptr inbounds i8, ptr %ptr, i64 8
+  %ld8 = load i8, ptr %gep.7
+  %zext.8 = zext i8 %ld8 to i64
+  %add.8 = add nuw nsw i64 %add.7, %zext.8
+  %gep.8 = getelementptr inbounds i8, ptr %ptr, i64 9
+  %ld9 = load i8, ptr %gep.8
+  %zext.9 = zext i8 %ld9 to i64
+  %add.9 = add nuw nsw i64 %add.8, %zext.9
+  %gep.9 = getelementptr inbounds i8, ptr %ptr, i64 10
+  %ld10 = load i8, ptr %gep.9
+  %zext.10 = zext i8 %ld10 to i64
+  %add.10 = add nuw nsw i64 %add.9, %zext.10
+  %gep.10 = getelementptr inbounds i8, ptr %ptr, i64 11
+  %ld11 = load i8, ptr %gep.10
+  %zext.11 = zext i8 %ld11 to i64
+  %add.11 = add nuw nsw i64 %add.10, %zext.11
+  %gep.11 = getelementptr inbounds i8, ptr %ptr, i64 12
+  %ld12 = load i8, ptr %gep.11
+  %zext.12 = zext i8 %ld12 to i64
+  %add.12 = add nuw nsw i64 %add.11, %zext.12
+  %gep.12 = getelementptr inbounds i8, ptr %ptr, i64 13
+  %ld13 = load i8, ptr %gep.12
+  %zext.13 = zext i8 %ld13 to i64
+  %add.13 = add nuw nsw i64 %add.12, %zext.13
+  %gep.13 = getelementptr inbounds i8, ptr %ptr, i64 14
+  %ld14 = load i8, ptr %gep.13
+  %zext.14 = zext i8 %ld14 to i64
+  %add.14 = add nuw nsw i64 %add.13, %zext.14
+  %gep.14 = getelementptr inbounds i8, ptr %ptr, i64 15
+  %ld15 = load i8, ptr %gep.14
+  %zext.15 = zext i8 %ld15 to i64
+  %add.15 = add nuw nsw i64 %add.14, %zext.15
+  ret i64 %add.15
+}
+
+


        


More information about the llvm-commits mailing list