[llvm] [RISCV] Add tests for vwaddu_wv+vabd(u) combine (PR #184962)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 5 23:23:32 PST 2026


https://github.com/wangpc-pp created https://github.com/llvm/llvm-project/pull/184962

None

>From e12bb1a64c7adee74eb0e8d67cf65c1417489f76 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Fri, 6 Mar 2026 15:23:12 +0800
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
 =?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Created using spr 1.3.6-beta.1
---
 llvm/test/CodeGen/RISCV/rvv/abd.ll | 106 +++++++++++++++++++++++++++++
 1 file changed, 106 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index c451559a29a69..1181ea4ced46c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -482,6 +482,112 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
   ret <vscale x 4 x i32> %abs
 }
 
+define <vscale x 8 x i16> @vwabda(<vscale x 8 x i16> %acc ,<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabda:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmin.vv v12, v10, v11
+; CHECK-NEXT:    vmax.vv v10, v10, v11
+; CHECK-NEXT:    vsub.vv v10, v10, v12
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vwabda:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v10, v11
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v10
+; ZVABD-NEXT:    ret
+  %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %ret = add <vscale x 8 x i16> %acc, %abs
+  ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 8 x i16> @vwabdau(<vscale x 8 x i16> %acc ,<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabdau:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vminu.vv v12, v10, v11
+; CHECK-NEXT:    vmaxu.vv v10, v10, v11
+; CHECK-NEXT:    vsub.vv v10, v10, v12
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vwabdau:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v10, v11
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v10
+; ZVABD-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %ret = add <vscale x 8 x i16> %acc, %abs
+  ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 8 x i32> @vwabda_zext_diff(<vscale x 8 x i32> %acc ,<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabda_zext_diff:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vminu.vv v14, v12, v13
+; CHECK-NEXT:    vmaxu.vv v12, v12, v13
+; CHECK-NEXT:    vsub.vv v14, v12, v14
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v14
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vwabda_zext_diff:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v14, v12, v13
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v12, v14
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %abs.zext = zext <vscale x 8 x i16> %abs to <vscale x 8 x i32>
+  %ret = add <vscale x 8 x i32> %acc, %abs.zext
+  ret <vscale x 8 x i32> %ret
+}
+
+define <vscale x 8 x i32> @vwabdau_zext_diff(<vscale x 8 x i32> %acc ,<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabdau_zext_diff:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmin.vv v14, v12, v13
+; CHECK-NEXT:    vmax.vv v12, v12, v13
+; CHECK-NEXT:    vsub.vv v14, v12, v14
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v14
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vwabdau_zext_diff:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v14, v12, v13
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v12, v14
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    ret
+  %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  %abs.zext = zext <vscale x 8 x i16> %abs to <vscale x 8 x i32>
+  %ret = add <vscale x 8 x i32> %acc, %abs.zext
+  ret <vscale x 8 x i32> %ret
+}
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}



More information about the llvm-commits mailing list