[clang] [llvm] [Clang][RISCV] Add Zvabd intrinsics (PR #180929)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 11 06:03:33 PST 2026


================
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+  ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+  ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+  ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+  ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+  ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+  ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+  ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
----------------
lukel97 wrote:

I think the scalable vector overload is written nxv16i16. But you can also just leave it out entirely
```suggestion
  %res = call <vscale x 16 x i16> @llvm.riscv.vabd(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
```

https://github.com/llvm/llvm-project/pull/180929


More information about the llvm-commits mailing list