[llvm] 7690c2c - [RISCV] Add tests for fixed vector mul reduction intrinsics. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 13 10:28:04 PST 2022


Author: Craig Topper
Date: 2022-01-13T10:24:01-08:00
New Revision: 7690c2c76c91d26b1ba53d4e9b2884d814a19d7e

URL: https://github.com/llvm/llvm-project/commit/7690c2c76c91d26b1ba53d4e9b2884d814a19d7e
DIFF: https://github.com/llvm/llvm-project/commit/7690c2c76c91d26b1ba53d4e9b2884d814a19d7e.diff

LOG: [RISCV] Add tests for fixed vector mul reduction intrinsics. NFC

CodeGen for this can be improved.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 9c53c8dcbd6b9..7d3698cbb0627 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -5546,3 +5546,961 @@ define i64 @vreduce_umax_v64i64(<64 x i64>* %x) nounwind {
   %red = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> %v)
   ret i64 %red
 }
+
+declare i8 @llvm.vector.reduce.mul.v1i8(<1 x i8>)
+
+define i8 @vreduce_mul_v1i8(<1 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <1 x i8>, <1 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v1i8(<1 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
+
+define i8 @vreduce_mul_v2i8(<2 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    lb a0, 1(a0)
+; CHECK-NEXT:    vmul.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, <2 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
+
+define i8 @vreduce_mul_v4i8(<4 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, <4 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
+
+define i8 @vreduce_mul_v8i8(<8 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v10, v9, 4
+; CHECK-NEXT:    vrgather.vv v11, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v11
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, <8 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
+
+define i8 @vreduce_mul_v16i8(<16 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v10, v9, 8
+; CHECK-NEXT:    vrgather.vv v11, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v11
+; CHECK-NEXT:    vadd.vi v10, v9, 4
+; CHECK-NEXT:    vrgather.vv v11, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v11
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, <16 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>)
+
+define i8 @vreduce_mul_v32i8(<32 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI253_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI253_0)
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vrgather.vv v12, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vadd.vi v12, v10, 8
+; CHECK-NEXT:    vrgather.vv v14, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v14
+; CHECK-NEXT:    vadd.vi v12, v10, 4
+; CHECK-NEXT:    vrgather.vv v14, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v14
+; CHECK-NEXT:    vadd.vi v10, v10, 2
+; CHECK-NEXT:    vrgather.vv v12, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vrgather.vi v10, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, <32 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v64i8(<64 x i8>)
+
+define i8 @vreduce_mul_v64i8(<64 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 64
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI254_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI254_0)
+; CHECK-NEXT:    vle8.v v12, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI254_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI254_1)
+; CHECK-NEXT:    vle8.v v16, (a0)
+; CHECK-NEXT:    vrgather.vv v20, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vrgather.vv v12, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    vadd.vi v16, v12, 8
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v16, v12, 4
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v12, v12, 2
+; CHECK-NEXT:    vrgather.vv v16, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vi v12, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <64 x i8>, <64 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v64i8(<64 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v128i8(<128 x i8>)
+
+define i8 @vreduce_mul_v128i8(<128 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v128i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI255_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI255_0)
+; CHECK-NEXT:    vle8.v v24, (a0)
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    lui a0, %hi(.LCPI255_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI255_1)
+; CHECK-NEXT:    vle8.v v24, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI255_2)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI255_2)
+; CHECK-NEXT:    vle8.v v0, (a0)
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v0
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <128 x i8>, <128 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v128i8(<128 x i8> %v)
+  ret i8 %red
+}
+
+declare i8 @llvm.vector.reduce.mul.v256i8(<256 x i8>)
+
+define i8 @vreduce_mul_v256i8(<256 x i8>* %x) {
+; CHECK-LABEL: vreduce_mul_v256i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 128
+; CHECK-NEXT:    vle8.v v16, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI256_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI256_0)
+; CHECK-NEXT:    vle8.v v24, (a0)
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    lui a0, %hi(.LCPI256_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI256_1)
+; CHECK-NEXT:    vle8.v v24, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI256_2)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI256_2)
+; CHECK-NEXT:    vle8.v v0, (a0)
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v0
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <256 x i8>, <256 x i8>* %x
+  %red = call i8 @llvm.vector.reduce.mul.v256i8(<256 x i8> %v)
+  ret i8 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v1i16(<1 x i16>)
+
+define i16 @vreduce_mul_v1i16(<1 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <1 x i16>, <1 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v1i16(<1 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
+
+define i16 @vreduce_mul_v2i16(<2 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    lh a0, 2(a0)
+; CHECK-NEXT:    vmul.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, <2 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
+
+define i16 @vreduce_mul_v4i16(<4 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, <4 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
+
+define i16 @vreduce_mul_v8i16(<8 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v10, v9, 4
+; CHECK-NEXT:    vrgather.vv v11, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v11
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, <8 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
+
+define i16 @vreduce_mul_v16i16(<16 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vadd.vi v12, v10, 8
+; CHECK-NEXT:    vrgather.vv v14, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v14
+; CHECK-NEXT:    vadd.vi v12, v10, 4
+; CHECK-NEXT:    vrgather.vv v14, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v14
+; CHECK-NEXT:    vadd.vi v10, v10, 2
+; CHECK-NEXT:    vrgather.vv v12, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vrgather.vi v10, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, <16 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v32i16(<32 x i16>)
+
+define i16 @vreduce_mul_v32i16(<32 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI262_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI262_0)
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vrgather.vv v16, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    vadd.vi v16, v12, 8
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v16, v12, 4
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v12, v12, 2
+; CHECK-NEXT:    vrgather.vv v16, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vi v12, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <32 x i16>, <32 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v32i16(<32 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v64i16(<64 x i16>)
+
+define i16 @vreduce_mul_v64i16(<64 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v64i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 64
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI263_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI263_0)
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI263_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI263_1)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    vrgather.vv v0, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <64 x i16>, <64 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v64i16(<64 x i16> %v)
+  ret i16 %red
+}
+
+declare i16 @llvm.vector.reduce.mul.v128i16(<128 x i16>)
+
+define i16 @vreduce_mul_v128i16(<128 x i16>* %x) {
+; CHECK-LABEL: vreduce_mul_v128i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 64
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 128
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI264_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI264_0)
+; CHECK-NEXT:    vle16.v v24, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI264_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI264_1)
+; CHECK-NEXT:    vle16.v v0, (a0)
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v0
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <128 x i16>, <128 x i16>* %x
+  %red = call i16 @llvm.vector.reduce.mul.v128i16(<128 x i16> %v)
+  ret i16 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32>)
+
+define i32 @vreduce_mul_v1i32(<1 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <1 x i32>, <1 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
+
+define i32 @vreduce_mul_v2i32(<2 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    lw a0, 4(a0)
+; CHECK-NEXT:    vmul.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, <2 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
+
+define i32 @vreduce_mul_v4i32(<4 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vid.v v9
+; CHECK-NEXT:    vadd.vi v9, v9, 2
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, <4 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
+
+define i32 @vreduce_mul_v8i32(<8 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vid.v v10
+; CHECK-NEXT:    vadd.vi v12, v10, 4
+; CHECK-NEXT:    vrgather.vv v14, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v14
+; CHECK-NEXT:    vadd.vi v10, v10, 2
+; CHECK-NEXT:    vrgather.vv v12, v8, v10
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vrgather.vi v10, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, <8 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>)
+
+define i32 @vreduce_mul_v16i32(<16 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vid.v v12
+; CHECK-NEXT:    vadd.vi v16, v12, 8
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v16, v12, 4
+; CHECK-NEXT:    vrgather.vv v20, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v20
+; CHECK-NEXT:    vadd.vi v12, v12, 2
+; CHECK-NEXT:    vrgather.vv v16, v8, v12
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vi v12, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <16 x i32>, <16 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v32i32(<32 x i32>)
+
+define i32 @vreduce_mul_v32i32(<32 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI270_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI270_0)
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <32 x i32>, <32 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v32i32(<32 x i32> %v)
+  ret i32 %red
+}
+
+declare i32 @llvm.vector.reduce.mul.v64i32(<64 x i32>)
+
+define i32 @vreduce_mul_v64i32(<64 x i32>* %x) {
+; CHECK-LABEL: vreduce_mul_v64i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    addi a0, a0, 128
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    lui a0, %hi(.LCPI271_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI271_0)
+; CHECK-NEXT:    vle32.v v24, (a0)
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vrgather.vv v16, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vid.v v16
+; CHECK-NEXT:    vadd.vi v24, v16, 8
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v24, v16, 4
+; CHECK-NEXT:    vrgather.vv v0, v8, v24
+; CHECK-NEXT:    vmul.vv v8, v8, v0
+; CHECK-NEXT:    vadd.vi v16, v16, 2
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmul.vv v8, v8, v24
+; CHECK-NEXT:    vrgather.vi v16, v8, 1
+; CHECK-NEXT:    vmul.vv v8, v8, v16
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+  %v = load <64 x i32>, <64 x i32>* %x
+  %red = call i32 @llvm.vector.reduce.mul.v64i32(<64 x i32> %v)
+  ret i32 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>)
+
+define i64 @vreduce_mul_v1i64(<1 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsrl.vx v9, v8, a0
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <1 x i64>, <1 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
+
+define i64 @vreduce_mul_v2i64(<2 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 8
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmul.vv v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    ld a0, 8(a0)
+; RV64-NEXT:    vmul.vx v8, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <2 x i64>, <2 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
+
+define i64 @vreduce_mul_v4i64(<4 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vid.v v10
+; RV32-NEXT:    vadd.vi v10, v10, 2
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v10
+; RV32-NEXT:    vmul.vv v8, v8, v12
+; RV32-NEXT:    vrgather.vi v10, v8, 1
+; RV32-NEXT:    vmul.vv v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    vid.v v10
+; RV64-NEXT:    vadd.vi v10, v10, 2
+; RV64-NEXT:    vrgather.vv v12, v8, v10
+; RV64-NEXT:    vmul.vv v8, v8, v12
+; RV64-NEXT:    vrgather.vi v10, v8, 1
+; RV64-NEXT:    vmul.vv v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <4 x i64>, <4 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)
+
+define i64 @vreduce_mul_v8i64(<8 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT:    vid.v v12
+; RV32-NEXT:    vadd.vi v13, v12, 4
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v16, v8, v13
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; RV32-NEXT:    vadd.vi v12, v12, 2
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v16, v8, v12
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vrgather.vi v12, v8, 1
+; RV32-NEXT:    vmul.vv v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    vid.v v12
+; RV64-NEXT:    vadd.vi v16, v12, 4
+; RV64-NEXT:    vrgather.vv v20, v8, v16
+; RV64-NEXT:    vmul.vv v8, v8, v20
+; RV64-NEXT:    vadd.vi v12, v12, 2
+; RV64-NEXT:    vrgather.vv v16, v8, v12
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vrgather.vi v12, v8, 1
+; RV64-NEXT:    vmul.vv v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <8 x i64>, <8 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
+
+define i64 @vreduce_mul_v16i64(<16 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v16i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vid.v v16
+; RV32-NEXT:    vadd.vi v18, v16, 8
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v18, v16, 4
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v16, v16, 2
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v16
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vrgather.vi v16, v8, 1
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v16i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    vid.v v16
+; RV64-NEXT:    vadd.vi v24, v16, 8
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v24, v16, 4
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v16, v16, 2
+; RV64-NEXT:    vrgather.vv v24, v8, v16
+; RV64-NEXT:    vmul.vv v8, v8, v24
+; RV64-NEXT:    vrgather.vi v16, v8, 1
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <16 x i64>, <16 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v32i64(<32 x i64>)
+
+define i64 @vreduce_mul_v32i64(<32 x i64>* %x) {
+; RV32-LABEL: vreduce_mul_v32i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 128
+; RV32-NEXT:    vle64.v v16, (a0)
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vid.v v16
+; RV32-NEXT:    vadd.vi v18, v16, 8
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v18, v16, 4
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v16, v16, 2
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v16
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vrgather.vi v16, v8, 1
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vsetivli zero, 0, e32, m8, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v32i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 128
+; RV64-NEXT:    vle64.v v16, (a0)
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vid.v v16
+; RV64-NEXT:    vadd.vi v24, v16, 8
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v24, v16, 4
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v16, v16, 2
+; RV64-NEXT:    vrgather.vv v24, v8, v16
+; RV64-NEXT:    vmul.vv v8, v8, v24
+; RV64-NEXT:    vrgather.vi v16, v8, 1
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <32 x i64>, <32 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v32i64(<32 x i64> %v)
+  ret i64 %red
+}
+
+declare i64 @llvm.vector.reduce.mul.v64i64(<64 x i64>)
+
+define i64 @vreduce_mul_v64i64(<64 x i64>* %x) nounwind {
+; RV32-LABEL: vreduce_mul_v64i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi a1, a0, 384
+; RV32-NEXT:    vle64.v v16, (a1)
+; RV32-NEXT:    addi a1, a0, 256
+; RV32-NEXT:    addi a0, a0, 128
+; RV32-NEXT:    vle64.v v24, (a0)
+; RV32-NEXT:    vle64.v v0, (a1)
+; RV32-NEXT:    vmul.vv v16, v24, v16
+; RV32-NEXT:    vmul.vv v8, v8, v0
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vid.v v16
+; RV32-NEXT:    vadd.vi v18, v16, 8
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v18, v16, 4
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v18
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; RV32-NEXT:    vadd.vi v16, v16, 2
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vrgatherei16.vv v24, v8, v16
+; RV32-NEXT:    vmul.vv v8, v8, v24
+; RV32-NEXT:    vrgather.vi v16, v8, 1
+; RV32-NEXT:    vmul.vv v8, v8, v16
+; RV32-NEXT:    vsetivli zero, 0, e32, m8, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    vsetivli zero, 1, e32, m8, ta, mu
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_mul_v64i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi a1, a0, 384
+; RV64-NEXT:    vle64.v v16, (a1)
+; RV64-NEXT:    addi a1, a0, 256
+; RV64-NEXT:    addi a0, a0, 128
+; RV64-NEXT:    vle64.v v24, (a0)
+; RV64-NEXT:    vle64.v v0, (a1)
+; RV64-NEXT:    vmul.vv v16, v24, v16
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vid.v v16
+; RV64-NEXT:    vadd.vi v24, v16, 8
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v24, v16, 4
+; RV64-NEXT:    vrgather.vv v0, v8, v24
+; RV64-NEXT:    vmul.vv v8, v8, v0
+; RV64-NEXT:    vadd.vi v16, v16, 2
+; RV64-NEXT:    vrgather.vv v24, v8, v16
+; RV64-NEXT:    vmul.vv v8, v8, v24
+; RV64-NEXT:    vrgather.vi v16, v8, 1
+; RV64-NEXT:    vmul.vv v8, v8, v16
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
+  %v = load <64 x i64>, <64 x i64>* %x
+  %red = call i64 @llvm.vector.reduce.mul.v64i64(<64 x i64> %v)
+  ret i64 %red
+}


        


More information about the llvm-commits mailing list