[llvm] 33b1be5 - [riscv] add test coverage for fractional lmul w/fixed length vectorization
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Tue May 31 10:25:43 PDT 2022
Author: Philip Reames
Date: 2022-05-31T10:25:37-07:00
New Revision: 33b1be5916669a74b3dc11b9f30f1ddb12270a2e
URL: https://github.com/llvm/llvm-project/commit/33b1be5916669a74b3dc11b9f30f1ddb12270a2e
DIFF: https://github.com/llvm/llvm-project/commit/33b1be5916669a74b3dc11b9f30f1ddb12270a2e.diff
LOG: [riscv] add test coverage for fractional lmul w/fixed length vectorization
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 15716a8ec6e7..0a3772e6799e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4281,3 +4281,255 @@ vector.body: ; preds = %vector.body, %entry
for.cond.cleanup: ; preds = %vector.body
ret void
}
+
+define void @sink_splat_mul_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_mul_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB81_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmul.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB81_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = mul <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_add_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_add_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB82_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB82_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = add <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_sub_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_sub_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB83_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsub.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB83_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = sub <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_rsub_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_rsub_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB84_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vrsub.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB84_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = sub <2 x i32> %broadcast.splat, %wide.load
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_and_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_and_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB85_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB85_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = and <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_or_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_or_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB86_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vor.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB86_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = or <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_xor_lmulmf2(i64* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_xor_lmulmf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: .LBB87_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, -4
+; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: bnez a2, .LBB87_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
+ %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i64, i64* %a, i64 %index
+ %1 = bitcast i64* %0 to <2 x i32>*
+ %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
+ %2 = xor <2 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i64* %0 to <2 x i32>*
+ store <2 x i32> %2, <2 x i32>* %3, align 8
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
More information about the llvm-commits
mailing list