[llvm] [RISCV] Support scalable vectors for the zvqdotq lowering paths (PR #140922)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed May 21 09:55:40 PDT 2025
================
@@ -0,0 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NODOT
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NODOT
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvqdotq -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,DOT,DOT32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvqdotq -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,DOT,DOT64
+
+define i32 @vqdot_vv(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; NODOT-LABEL: vqdot_vv:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vsext.vf2 v20, v10
+; NODOT-NEXT: vwmul.vv v8, v16, v20
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, zero
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdot_vv:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdot.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdot_vx_constant(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vqdot_vx_constant:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: li a0, 23
+; CHECK-NEXT: vwmul.vx v8, v16, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, splat (i32 23)
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdot_vx_constant_swapped(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vqdot_vx_constant_swapped:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: li a0, 23
+; CHECK-NEXT: vwmul.vx v8, v16, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> splat (i32 23), %a.sext
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdotu_vv(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; NODOT-LABEL: vqdotu_vv:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; NODOT-NEXT: vwmulu.vv v12, v8, v10
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; NODOT-NEXT: vwredsumu.vs v8, v12, v8
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotu_vv:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdotu.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.zext, %b.zext
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdotu_vx_constant(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vqdotu_vx_constant:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v16, v8
+; CHECK-NEXT: li a0, 123
+; CHECK-NEXT: vwmulu.vx v8, v16, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.zext, splat (i32 123)
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdotsu_vv(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; NODOT-LABEL: vqdotsu_vv:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vzext.vf2 v20, v10
+; NODOT-NEXT: vwmulsu.vv v8, v16, v20
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, zero
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotsu_vv:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdotsu.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.zext
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vqdotsu_vv_swapped(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; NODOT-LABEL: vqdotsu_vv_swapped:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vzext.vf2 v20, v10
+; NODOT-NEXT: vwmulsu.vv v8, v16, v20
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, zero
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotsu_vv_swapped:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdotsu.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %b.zext, %a.sext
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vdotqsu_vx_constant(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vdotqsu_vx_constant:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: li a0, 123
+; CHECK-NEXT: vwmul.vx v8, v16, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, splat (i32 123)
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @vdotqus_vx_constant(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: vdotqus_vx_constant:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v16, v8
+; CHECK-NEXT: li a0, -23
+; CHECK-NEXT: vmv.v.x v20, a0
+; CHECK-NEXT: vwmulsu.vv v8, v20, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmv.s.x v16, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v16
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.zext, splat (i32 -23)
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ ret i32 %res
+}
+
+define i32 @reduce_of_sext(<vscale x 16 x i8> %a) {
+; NODOT-LABEL: reduce_of_sext:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; NODOT-NEXT: vsext.vf4 v16, v8
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vredsum.vs v8, v16, v8
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT32-LABEL: reduce_of_sext:
+; DOT32: # %bb.0: # %entry
+; DOT32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT32-NEXT: vmv.v.i v10, 0
+; DOT32-NEXT: lui a0, 4112
+; DOT32-NEXT: addi a0, a0, 257
+; DOT32-NEXT: vqdot.vx v10, v8, a0
+; DOT32-NEXT: vmv.s.x v8, zero
+; DOT32-NEXT: vredsum.vs v8, v10, v8
+; DOT32-NEXT: vmv.x.s a0, v8
+; DOT32-NEXT: ret
+;
+; DOT64-LABEL: reduce_of_sext:
+; DOT64: # %bb.0: # %entry
+; DOT64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT64-NEXT: vmv.v.i v10, 0
+; DOT64-NEXT: lui a0, 4112
+; DOT64-NEXT: addiw a0, a0, 257
+; DOT64-NEXT: vqdot.vx v10, v8, a0
+; DOT64-NEXT: vmv.s.x v8, zero
+; DOT64-NEXT: vredsum.vs v8, v10, v8
+; DOT64-NEXT: vmv.x.s a0, v8
+; DOT64-NEXT: ret
+entry:
+ %a.ext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %a.ext)
+ ret i32 %res
+}
+
+define i32 @reduce_of_zext(<vscale x 16 x i8> %a) {
+; NODOT-LABEL: reduce_of_zext:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; NODOT-NEXT: vzext.vf4 v16, v8
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vredsum.vs v8, v16, v8
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT32-LABEL: reduce_of_zext:
+; DOT32: # %bb.0: # %entry
+; DOT32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT32-NEXT: vmv.v.i v10, 0
+; DOT32-NEXT: lui a0, 4112
+; DOT32-NEXT: addi a0, a0, 257
+; DOT32-NEXT: vqdotu.vx v10, v8, a0
+; DOT32-NEXT: vmv.s.x v8, zero
+; DOT32-NEXT: vredsum.vs v8, v10, v8
+; DOT32-NEXT: vmv.x.s a0, v8
+; DOT32-NEXT: ret
+;
+; DOT64-LABEL: reduce_of_zext:
+; DOT64: # %bb.0: # %entry
+; DOT64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT64-NEXT: vmv.v.i v10, 0
+; DOT64-NEXT: lui a0, 4112
+; DOT64-NEXT: addiw a0, a0, 257
+; DOT64-NEXT: vqdotu.vx v10, v8, a0
+; DOT64-NEXT: vmv.s.x v8, zero
+; DOT64-NEXT: vredsum.vs v8, v10, v8
+; DOT64-NEXT: vmv.x.s a0, v8
+; DOT64-NEXT: ret
+entry:
+ %a.ext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %res = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %a.ext)
+ ret i32 %res
+}
+
+define i32 @vqdot_vv_accum(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i32> %x) {
+; NODOT-LABEL: vqdot_vv_accum:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v12, v8
+; NODOT-NEXT: vsext.vf2 v24, v10
+; NODOT-NEXT: vwmacc.vv v16, v12, v24
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vredsum.vs v8, v16, v8
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdot_vv_accum:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.s.x v12, zero
+; DOT-NEXT: vqdot.vv v16, v8, v10
+; DOT-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; DOT-NEXT: vredsum.vs v8, v16, v12
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %add = add <vscale x 16 x i32> %mul, %x
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %add)
+ ret i32 %sum
+}
+
+define i32 @vqdotu_vv_accum(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i32> %x) {
+; NODOT-LABEL: vqdotu_vv_accum:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; NODOT-NEXT: vwmulu.vv v12, v8, v10
+; NODOT-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; NODOT-NEXT: vwaddu.wv v16, v16, v12
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vredsum.vs v8, v16, v8
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotu_vv_accum:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.s.x v12, zero
+; DOT-NEXT: vqdotu.vv v16, v8, v10
+; DOT-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; DOT-NEXT: vredsum.vs v8, v16, v12
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.zext, %b.zext
+ %add = add <vscale x 16 x i32> %mul, %x
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %add)
+ ret i32 %sum
+}
+
+define i32 @vqdotsu_vv_accum(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i32> %x) {
+; NODOT-LABEL: vqdotsu_vv_accum:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v12, v8
+; NODOT-NEXT: vzext.vf2 v24, v10
+; NODOT-NEXT: vwmaccsu.vv v16, v12, v24
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v8, zero
+; NODOT-NEXT: vredsum.vs v8, v16, v8
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotsu_vv_accum:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.s.x v12, zero
+; DOT-NEXT: vqdotsu.vv v16, v8, v10
+; DOT-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; DOT-NEXT: vredsum.vs v8, v16, v12
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.zext
+ %add = add <vscale x 16 x i32> %mul, %x
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %add)
+ ret i32 %sum
+}
+
+define i32 @vqdot_vv_scalar_add(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 %x) {
+; NODOT-LABEL: vqdot_vv_scalar_add:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vsext.vf2 v20, v10
+; NODOT-NEXT: vwmul.vv v8, v16, v20
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, a0
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdot_vv_scalar_add:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdot.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, a0
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ %add = add i32 %sum, %x
+ ret i32 %add
+}
+
+define i32 @vqdotu_vv_scalar_add(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 %x) {
+; NODOT-LABEL: vqdotu_vv_scalar_add:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; NODOT-NEXT: vwmulu.vv v12, v8, v10
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v8, a0
+; NODOT-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; NODOT-NEXT: vwredsumu.vs v8, v12, v8
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotu_vv_scalar_add:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdotu.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, a0
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.zext, %b.zext
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ %add = add i32 %sum, %x
+ ret i32 %add
+}
+
+define i32 @vqdotsu_vv_scalar_add(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 %x) {
+; NODOT-LABEL: vqdotsu_vv_scalar_add:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vzext.vf2 v20, v10
+; NODOT-NEXT: vwmulsu.vv v8, v16, v20
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, a0
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdotsu_vv_scalar_add:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v12, 0
+; DOT-NEXT: vqdotsu.vv v12, v8, v10
+; DOT-NEXT: vmv.s.x v8, a0
+; DOT-NEXT: vredsum.vs v8, v12, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.zext
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %mul)
+ %add = add i32 %sum, %x
+ ret i32 %add
+}
+
+define i32 @vqdot_vv_split(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d) {
+; NODOT-LABEL: vqdot_vv_split:
+; NODOT: # %bb.0: # %entry
+; NODOT-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; NODOT-NEXT: vsext.vf2 v16, v8
+; NODOT-NEXT: vsext.vf2 v20, v10
+; NODOT-NEXT: vsext.vf2 v24, v12
+; NODOT-NEXT: vsext.vf2 v28, v14
+; NODOT-NEXT: vwmul.vv v8, v16, v20
+; NODOT-NEXT: vwmacc.vv v8, v24, v28
+; NODOT-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; NODOT-NEXT: vmv.s.x v16, zero
+; NODOT-NEXT: vredsum.vs v8, v8, v16
+; NODOT-NEXT: vmv.x.s a0, v8
+; NODOT-NEXT: ret
+;
+; DOT-LABEL: vqdot_vv_split:
+; DOT: # %bb.0: # %entry
+; DOT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; DOT-NEXT: vmv.v.i v16, 0
+; DOT-NEXT: vqdot.vv v16, v8, v10
+; DOT-NEXT: vqdot.vv v16, v12, v14
+; DOT-NEXT: vmv.s.x v8, zero
+; DOT-NEXT: vredsum.vs v8, v16, v8
+; DOT-NEXT: vmv.x.s a0, v8
+; DOT-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %c.sext = sext <vscale x 16 x i8> %c to <vscale x 16 x i32>
+ %d.sext = sext <vscale x 16 x i8> %d to <vscale x 16 x i32>
+ %mul2 = mul nuw nsw <vscale x 16 x i32> %c.sext, %d.sext
+ %add = add <vscale x 16 x i32> %mul, %mul2
+ %sum = tail call i32 @llvm.vector.reduce.add.v16i32(<vscale x 16 x i32> %add)
+ ret i32 %sum
+}
+
+
+define <vscale x 4 x i32> @vqdot_vv_partial_reduce(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: vqdot_vv_partial_reduce:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v14, v8
+; CHECK-NEXT: vadd.vv v10, v10, v12
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %res = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i32> %mul)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @vqdot_vv_partial_reduce2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 4 x i32> %accum) {
+; CHECK-LABEL: vqdot_vv_partial_reduce2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v24, v8
+; CHECK-NEXT: vsext.vf2 v28, v10
+; CHECK-NEXT: vwmul.vv v16, v24, v28
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v18, v20
+; CHECK-NEXT: vadd.vv v10, v12, v16
+; CHECK-NEXT: vadd.vv v10, v22, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %res = call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add(<vscale x 4 x i32> %accum, <vscale x 16 x i32> %mul)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 16 x i32> @vqdot_vv_partial_reduce3(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: vqdot_vv_partial_reduce3:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i32>
+ %mul = mul nuw nsw <vscale x 16 x i32> %a.sext, %b.sext
+ %res = call <vscale x 16 x i32> @llvm.experimental.vector.partial.reduce.add.nvx8i32.nvx16i32.nvx16i32(<vscale x 16 x i32> %mul, <vscale x 16 x i32> zeroinitializer)
----------------
topperc wrote:
There are two many types on this intrinsic name and v8i32 doesn't match the result or operands.
https://github.com/llvm/llvm-project/pull/140922
More information about the llvm-commits
mailing list