[llvm] 98eb241 - [RISCV] Add tests for vwaddu_wv+vabd(u) combine
via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 10 23:54:56 PDT 2026
Author: Pengcheng Wang
Date: 2026-03-11T14:54:52+08:00
New Revision: 98eb2413ba899b754db352dde3fd459be102b187
URL: https://github.com/llvm/llvm-project/commit/98eb2413ba899b754db352dde3fd459be102b187
DIFF: https://github.com/llvm/llvm-project/commit/98eb2413ba899b754db352dde3fd459be102b187.diff
LOG: [RISCV] Add tests for vwaddu_wv+vabd(u) combine
Reviewers: lukel97, topperc, preames, mgudim, mshockwave
Pull Request: https://github.com/llvm/llvm-project/pull/184962
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/abd.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index c451559a29a69..b0e60c508c1c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -482,6 +482,112 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
ret <vscale x 4 x i32> %abs
}
+define <vscale x 8 x i16> @vwabda(<vscale x 8 x i16> %acc, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabda:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v12, v10, v11
+; CHECK-NEXT: vmax.vv v10, v10, v11
+; CHECK-NEXT: vsub.vv v10, v10, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
+; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vwabda:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v10, v10, v11
+; ZVABD-NEXT: vwaddu.wv v8, v8, v10
+; ZVABD-NEXT: ret
+ %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ %ret = add <vscale x 8 x i16> %acc, %abs
+ ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 8 x i16> @vwabdau(<vscale x 8 x i16> %acc, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabdau:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v12, v10, v11
+; CHECK-NEXT: vmaxu.vv v10, v10, v11
+; CHECK-NEXT: vsub.vv v10, v10, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
+; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vwabdau:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v10, v10, v11
+; ZVABD-NEXT: vwaddu.wv v8, v8, v10
+; ZVABD-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ %ret = add <vscale x 8 x i16> %acc, %abs
+ ret <vscale x 8 x i16> %ret
+}
+
+define <vscale x 8 x i32> @vwabda_sext_
diff (<vscale x 8 x i32> %acc, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabda_sext_
diff :
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v14, v12, v13
+; CHECK-NEXT: vmax.vv v12, v12, v13
+; CHECK-NEXT: vsub.vv v14, v12, v14
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v14
+; CHECK-NEXT: vwaddu.wv v8, v8, v12
+; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vwabda_sext_
diff :
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v14, v12, v13
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v12, v14
+; ZVABD-NEXT: vwaddu.wv v8, v8, v12
+; ZVABD-NEXT: ret
+ %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ %abs.zext = zext <vscale x 8 x i16> %abs to <vscale x 8 x i32>
+ %ret = add <vscale x 8 x i32> %acc, %abs.zext
+ ret <vscale x 8 x i32> %ret
+}
+
+define <vscale x 8 x i32> @vwabdau_zext_
diff (<vscale x 8 x i32> %acc, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwabdau_zext_
diff :
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v14, v12, v13
+; CHECK-NEXT: vmaxu.vv v12, v12, v13
+; CHECK-NEXT: vsub.vv v14, v12, v14
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v14
+; CHECK-NEXT: vwaddu.wv v8, v8, v12
+; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vwabdau_zext_
diff :
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v14, v12, v13
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v12, v14
+; ZVABD-NEXT: vwaddu.wv v8, v8, v12
+; ZVABD-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ %abs.zext = zext <vscale x 8 x i16> %abs to <vscale x 8 x i32>
+ %ret = add <vscale x 8 x i32> %acc, %abs.zext
+ ret <vscale x 8 x i32> %ret
+}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV64: {{.*}}
More information about the llvm-commits
mailing list