[clang] [llvm] [WIP][RISCV] Support for Zvabd fast-track proposal (PR #124239)
Pengcheng Wang via cfe-commits
cfe-commits at lists.llvm.org
Thu Feb 6 00:17:44 PST 2025
https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/124239
>From 7d89f08d036f9115aaba2f3bc370d6a094c090da Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 16 Jul 2024 16:08:16 +0800
Subject: [PATCH 1/4] [RISCV][MC] Support Zvabd instructions
Support of these instructions are added:
- Vector Single-Width Signed/Unsigned Integer Absolute Difference
- Vector Widening Signed/Unsigned Integer Absolute Difference and
Accumulate
Doc: https://bytedance.larkoffice.com/docx/DqaLdNqNao8WgZxgUJkcqIVPn7g
---
.../Driver/print-supported-extensions-riscv.c | 1 +
.../test/Preprocessor/riscv-target-features.c | 9 ++
llvm/lib/Target/RISCV/RISCVFeatures.td | 6 +
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 1 +
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 28 +++++
llvm/test/CodeGen/RISCV/attributes.ll | 4 +
llvm/test/MC/RISCV/rvv/zvabd-aliases.s | 12 ++
llvm/test/MC/RISCV/rvv/zvabd-invalid.s | 18 +++
llvm/test/MC/RISCV/rvv/zvabd.s | 105 ++++++++++++++++++
.../TargetParser/RISCVISAInfoTest.cpp | 1 +
10 files changed, 185 insertions(+)
create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
create mode 100644 llvm/test/MC/RISCV/rvv/zvabd-aliases.s
create mode 100644 llvm/test/MC/RISCV/rvv/zvabd-invalid.s
create mode 100644 llvm/test/MC/RISCV/rvv/zvabd.s
diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 3443ff0b69de9a5..b0422ffc5815987 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -185,6 +185,7 @@
// CHECK-NEXT: zicfilp 1.0 'Zicfilp' (Landing pad)
// CHECK-NEXT: zicfiss 1.0 'Zicfiss' (Shadow stack)
// CHECK-NEXT: zalasr 0.1 'Zalasr' (Load-Acquire and Store-Release Instructions)
+// CHECK-NEXT: zvabd 0.2 'Zvabd' (Vector Absolute Difference)
// CHECK-NEXT: zvbc32e 0.7 'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements)
// CHECK-NEXT: zvkgs 0.7 'Zvkgs' (Vector-Scalar GCM instructions for Cryptography)
// CHECK-NEXT: sdext 1.0 'Sdext' (External debugger)
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index c21977113527575..2725c283f107d19 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -145,6 +145,7 @@
// CHECK-NOT: __riscv_zksh {{.*$}}
// CHECK-NOT: __riscv_zkt {{.*$}}
// CHECK-NOT: __riscv_zmmul {{.*$}}
+// CHECK-NOT: __riscv_zvabd {{.*$}}
// CHECK-NOT: __riscv_zvbb {{.*$}}
// CHECK-NOT: __riscv_zvbc {{.*$}}
// CHECK-NOT: __riscv_zve32f {{.*$}}
@@ -1504,6 +1505,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-ZFA-EXT %s
// CHECK-ZFA-EXT: __riscv_zfa 1000000{{$}}
+// RUN: %clang --target=riscv32 -menable-experimental-extensions \
+// RUN: -march=rv32i_zve64x_zvabd0p2 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// RUN: %clang --target=riscv64 -menable-experimental-extensions \
+// RUN: -march=rv64i_zve64x_zvabd0p2 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// CHECK-ZVABD-EXT: __riscv_zvabd 2000{{$}}
+
// RUN: %clang --target=riscv32 \
// RUN: -march=rv32i_zve64x_zvbb1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZVBB-EXT %s
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index f050977c55e196a..4fa52bbd4a5c8c6 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -646,6 +646,12 @@ def FeatureStdExtV
[FeatureStdExtZvl128b, FeatureStdExtZve64d]>,
RISCVExtensionBitmask<0, 21>;
+def FeatureStdExtZvabd
+ : RISCVExperimentalExtension<0, 2, "Vector Absolute Difference">;
+def HasStdExtZvabd : Predicate<"Subtarget->hasStdExtZvabd()">,
+ AssemblerPredicate<(all_of FeatureStdExtZvabd),
+ "'Zvabd' (Vector Absolute Difference)">;
+
def FeatureStdExtZvfbfmin
: RISCVExtension<1, 0, "Vector BF16 Converts", [FeatureStdExtZve32f]>;
def HasStdExtZvfbfmin : Predicate<"Subtarget->hasStdExtZvfbfmin()">,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index fec10864f95dc62..80bb7eaa8ffe2d8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -2114,6 +2114,7 @@ include "RISCVInstrInfoZk.td"
// Vector
include "RISCVInstrInfoV.td"
include "RISCVInstrInfoZvk.td"
+include "RISCVInstrInfoZvabd.td"
// Compressed
include "RISCVInstrInfoC.td"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
new file mode 100644
index 000000000000000..36b5711d1d0aecb
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -0,0 +1,28 @@
+//===-- RISCVInstrInfoZvabd.td - 'Zvabd' instructions ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file describes the RISC-V instructions for 'Zvabd' (Vector Absolute
+/// Difference).
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction Definitions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+ defm VABD_V : VAALU_MV_V_X<"vabd", 0b010001>;
+ defm VABDU_V : VAALU_MV_V_X<"vabdu", 0b010011>;
+
+ let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
+ defm VWABDACC_V : VALU_MV_V_X<"vwabdacc", 0b010101, "v">;
+ defm VWABDACCU_V : VALU_MV_V_X<"vwabdaccu", 0b010110, "v">;
+ } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
+
+ def : InstAlias<"vabs.v $vd, $vs$vm", (VABD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
+ def : InstAlias<"vabs.v $vd, $vs", (VABD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index b9d5bf0a7227cde..9b0c9aef550d63b 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -121,6 +121,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+zvksh %s -o - | FileCheck --check-prefix=RV32ZVKSH %s
; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV32ZVKT %s
; RUN: llc -mtriple=riscv32 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV32ZVFH %s
+; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV32ZVABD %s
; RUN: llc -mtriple=riscv32 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV32ZICOND %s
; RUN: llc -mtriple=riscv32 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV32ZIMOP %s
; RUN: llc -mtriple=riscv32 -mattr=+zcmop %s -o - | FileCheck --check-prefix=RV32ZCMOP %s
@@ -270,6 +271,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+zvksh %s -o - | FileCheck --check-prefix=RV64ZVKSH %s
; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV64ZVKT %s
; RUN: llc -mtriple=riscv64 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV64ZVFH %s
+; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV64ZVABD %s
; RUN: llc -mtriple=riscv64 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV64ZICOND %s
; RUN: llc -mtriple=riscv64 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV64ZIMOP %s
; RUN: llc -mtriple=riscv64 -mattr=+zcmop %s -o - | FileCheck --check-prefix=RV64ZCMOP %s
@@ -437,6 +439,7 @@
; RV32ZVKSH: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvksh1p0_zvl32b1p0"
; RV32ZVKT: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
; RV32ZVFH: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV32ZVABD: .attribute 5, "rv32i2p1_zicsr2p0_zvabd0p2_zve32x1p0_zvl32b1p0"
; RV32ZICOND: .attribute 5, "rv32i2p1_zicond1p0"
; RV32ZIMOP: .attribute 5, "rv32i2p1_zimop1p0"
; RV32ZCMOP: .attribute 5, "rv32i2p1_zca1p0_zcmop1p0"
@@ -584,6 +587,7 @@
; RV64ZVKSH: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvksh1p0_zvl32b1p0"
; RV64ZVKT: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
; RV64ZVFH: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV64ZVABD: .attribute 5, "rv64i2p1_zicsr2p0_zvabd0p2_zve32x1p0_zvl32b1p0"
; RV64ZICOND: .attribute 5, "rv64i2p1_zicond1p0"
; RV64ZIMOP: .attribute 5, "rv64i2p1_zimop1p0"
; RV64ZCMOP: .attribute 5, "rv64i2p1_zca1p0_zcmop1p0"
diff --git a/llvm/test/MC/RISCV/rvv/zvabd-aliases.s b/llvm/test/MC/RISCV/rvv/zvabd-aliases.s
new file mode 100644
index 000000000000000..e6a87f38d75d0bf
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd-aliases.s
@@ -0,0 +1,12 @@
+# RUN: llvm-mc --triple=riscv64 -mattr=+v,+experimental-zvabd < %s --show-encoding 2>&1 \
+# RUN: | FileCheck --check-prefix=ALIAS %s
+# RUN: llvm-mc --triple=riscv64 -mattr=+v,+experimental-zvabd --M no-aliases < %s --show-encoding 2>&1 \
+# RUN: | FileCheck --check-prefix=NO-ALIAS %s
+
+# ALIAS: vabs.v v2, v1 # encoding: [0x57,0x61,0x10,0x46]
+# NO-ALIAS: vabd.vx v2, v1, zero # encoding: [0x57,0x61,0x10,0x46]
+vabs.v v2, v1
+
+# ALIAS: vabs.v v2, v1, v0.t # encoding: [0x57,0x61,0x10,0x44]
+# NO-ALIAS: vabd.vx v2, v1, zero, v0.t # encoding: [0x57,0x61,0x10,0x44]
+vabs.v v2, v1, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/zvabd-invalid.s b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
new file mode 100644
index 000000000000000..da9184364020aba
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
@@ -0,0 +1,18 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+experimental-zvabd %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vwabdacc.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:13: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdacc.vv v9, v9, v8
+
+vwabdacc.vx v9, v9, a0
+# CHECK-ERROR: [[@LINE-1]]:13: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdacc.vx v9, v9, a0
+
+vwabdaccu.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:14: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdaccu.vv v9, v9, v8
+
+vwabdaccu.vx v9, v9, a0
+# CHECK-ERROR: [[@LINE-1]]:14: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdaccu.vx v9, v9, a0
diff --git a/llvm/test/MC/RISCV/rvv/zvabd.s b/llvm/test/MC/RISCV/rvv/zvabd.s
new file mode 100644
index 000000000000000..d765e01c52081b3
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd.s
@@ -0,0 +1,105 @@
+# RUN: llvm-mc -triple=riscv32 -show-encoding --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: not llvm-mc -triple=riscv32 -show-encoding %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN: | llvm-objdump -d --mattr=+v --mattr=+experimental-zvabd --no-print-imm-hex - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+vabd.vv v10, v9, v8
+# CHECK-INST: vabd.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 46942557 <unknown>
+
+vabd.vv v10, v9, v8, v0.t
+# CHECK-INST: vabd.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 44942557 <unknown>
+
+vabd.vx v10, v9, a0
+# CHECK-INST: vabd.vx v10, v9, a0
+# CHECK-ENCODING: [0x57,0x65,0x95,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 46956557 <unknown>
+
+vabd.vx v10, v9, a0, v0.t
+# CHECK-INST: vabd.vx v10, v9, a0, v0.t
+# CHECK-ENCODING: [0x57,0x65,0x95,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 44956557 <unknown>
+
+vabdu.vv v10, v9, v8
+# CHECK-INST: vabdu.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4e942557 <unknown>
+
+vabdu.vv v10, v9, v8, v0.t
+# CHECK-INST: vabdu.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4c942557 <unknown>
+
+vabdu.vx v10, v9, a0
+# CHECK-INST: vabdu.vx v10, v9, a0
+# CHECK-ENCODING: [0x57,0x65,0x95,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4e956557 <unknown>
+
+vabdu.vx v10, v9, a0, v0.t
+# CHECK-INST: vabdu.vx v10, v9, a0, v0.t
+# CHECK-ENCODING: [0x57,0x65,0x95,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4c956557 <unknown>
+
+vwabdacc.vv v10, v9, v8
+# CHECK-INST: vwabdacc.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x56]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 56942557 <unknown>
+
+vwabdacc.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabdacc.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x54]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 54942557 <unknown>
+
+vwabdacc.vx v10, v9, a0
+# CHECK-INST: vwabdacc.vx v10, v9, a0
+# CHECK-ENCODING: [0x57,0x65,0x95,0x56]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 56956557 <unknown>
+
+vwabdacc.vx v10, v9, a0, v0.t
+# CHECK-INST: vwabdacc.vx v10, v9, a0, v0.t
+# CHECK-ENCODING: [0x57,0x65,0x95,0x54]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 54956557 <unknown>
+
+vwabdaccu.vv v10, v9, v8
+# CHECK-INST: vwabdaccu.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 5a942557 <unknown>
+
+vwabdaccu.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabdaccu.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 58942557 <unknown>
+
+vwabdaccu.vx v10, v9, a0
+# CHECK-INST: vwabdaccu.vx v10, v9, a0
+# CHECK-ENCODING: [0x57,0x65,0x95,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 5a956557 <unknown>
+
+vwabdaccu.vx v10, v9, a0, v0.t
+# CHECK-INST: vwabdaccu.vx v10, v9, a0, v0.t
+# CHECK-ENCODING: [0x57,0x65,0x95,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 58956557 <unknown>
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index 7ebfcf915a7c5e0..a4bd576ae77e93e 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1111,6 +1111,7 @@ Experimental extensions
zicfilp 1.0 This is a long dummy description
zicfiss 1.0
zalasr 0.1
+ zvabd 0.2
zvbc32e 0.7
zvkgs 0.7
sdext 1.0
>From bd082e34e5f32cf6f21f7f0b0527c52274bef533 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 24 Jan 2025 15:55:53 +0800
Subject: [PATCH 2/4] [RISCV][CodeGen] Lowering abds/abdu to Zvabd instructions
We directly lower `ISD::ABDS`/`ISD::ABDU` to Zvabd instructions.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 +-
llvm/lib/Target/RISCV/RISCVISelLowering.h | 4 +
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 10 +-
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 24 ++
llvm/test/CodeGen/RISCV/rvv/abd.ll | 132 ++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-abd.ll | 284 ++++++++++++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-sad.ll | 83 +++++
7 files changed, 545 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6a0a5aa4ba415ef..d273e3c805bd970 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -833,7 +833,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
Legal);
- setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+ if (Subtarget.hasStdExtZvabd())
+ setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+ else
+ setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
// Custom-lower extensions and truncations from/to mask types.
setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
@@ -6451,6 +6454,8 @@ static unsigned getRISCVVLOp(SDValue Op) {
OP_CASE(SMAX)
OP_CASE(UMIN)
OP_CASE(UMAX)
+ OP_CASE(ABDS)
+ OP_CASE(ABDU)
OP_CASE(STRICT_FADD)
OP_CASE(STRICT_FSUB)
OP_CASE(STRICT_FMUL)
@@ -6553,7 +6558,7 @@ static bool hasPassthruOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 127 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 129 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::ADD_VL && Opcode <= RISCVISD::VFMAX_VL)
@@ -6577,7 +6582,7 @@ static bool hasMaskOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 127 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 129 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::TRUNCATE_VECTOR_VL && Opcode <= RISCVISD::SETCC_VL)
@@ -7581,6 +7586,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerToScalableOp(Op, DAG);
case ISD::ABDS:
case ISD::ABDU: {
+ if (Subtarget.hasStdExtZvabd())
+ return lowerToScalableOp(Op, DAG);
+
SDLoc dl(Op);
EVT VT = Op->getValueType(0);
SDValue LHS = DAG.getFreeze(Op->getOperand(0));
@@ -21317,6 +21325,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(VZEXT_VL)
NODE_NAME_CASE(VCPOP_VL)
NODE_NAME_CASE(VFIRST_VL)
+ NODE_NAME_CASE(ABDS_VL)
+ NODE_NAME_CASE(ABDU_VL)
NODE_NAME_CASE(READ_CSR)
NODE_NAME_CASE(WRITE_CSR)
NODE_NAME_CASE(SWAP_CSR)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index e9dd8ff96fa37b4..bacbd7c2040fc94 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -282,6 +282,10 @@ enum NodeType : unsigned {
UMIN_VL,
UMAX_VL,
+ // Vector Absolute Difference.
+ ABDS_VL,
+ ABDU_VL,
+
BITREVERSE_VL,
BSWAP_VL,
CTLZ_VL,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 268bfe70673a2ac..4bf5ba1edea801d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2850,11 +2850,11 @@ multiclass VPseudoVFRDIV_VF_RM {
}
}
-multiclass VPseudoVALU_VV_VX {
- foreach m = MxList in {
- defm "" : VPseudoBinaryV_VV<m>,
- SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
- forcePassthruRead=true>;
+multiclass VPseudoVALU_VV_VX<bit Commutable = 0> {
+ foreach m = MxList in {
+ defm "" : VPseudoBinaryV_VV<m, Commutable = Commutable>,
+ SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
+ forcePassthruRead = true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
forcePassthruRead=true>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 36b5711d1d0aecb..d8a9d7f952e1d33 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -26,3 +26,27 @@ let Predicates = [HasStdExtZvabd] in {
def : InstAlias<"vabs.v $vd, $vs$vm", (VABD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
def : InstAlias<"vabs.v $vd, $vs", (VABD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
} // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// Pseudos
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+ defm PseudoVABD : VPseudoVALU_VV_VX<Commutable = 1>;
+ defm PseudoVABDU : VPseudoVALU_VV_VX<Commutable = 1>;
+} // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// CodeGen Patterns
+//===----------------------------------------------------------------------===//
+def riscv_abds_vl
+ : SDNode<"RISCVISD::ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_abdu_vl
+ : SDNode<"RISCVISD::ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+
+let Predicates = [HasStdExtZvabd] in {
+defm : VPatBinarySDNode_VV_VX<abds, "PseudoVABD">;
+defm : VPatBinarySDNode_VV_VX<abdu, "PseudoVABDU">;
+
+defm : VPatBinaryVL_VV_VX<riscv_abds_vl, "PseudoVABD">;
+defm : VPatBinaryVL_VV_VX<riscv_abdu_vl, "PseudoVABDU">;
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index 583d872238df710..0008e555e7cff8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
;
; SABD
@@ -14,6 +16,12 @@ define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
%b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
%sub = sub <vscale x 16 x i16> %a.sext, %b.sext
@@ -30,6 +38,14 @@ define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_b_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT: vmxor.mm v0, v0, v8
+; ZVABD-NEXT: vmv.v.i v8, 0
+; ZVABD-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
%sub = sub <vscale x 16 x i8> %a.sext, %b.sext
@@ -45,6 +61,12 @@ define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
%b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
%sub = sub <vscale x 8 x i32> %a.sext, %b.sext
@@ -63,6 +85,14 @@ define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
%b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
%sub = sub <vscale x 8 x i16> %a.sext, %b.sext
@@ -78,6 +108,12 @@ define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
%b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
%sub = sub <vscale x 4 x i64> %a.sext, %b.sext
@@ -96,6 +132,14 @@ define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
%b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
%sub = sub <vscale x 4 x i32> %a.sext, %b.sext
@@ -129,6 +173,14 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_d_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
%b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
%sub = sub <vscale x 2 x i64> %a.sext, %b.sext
@@ -148,6 +200,12 @@ define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
%b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
%sub = sub <vscale x 16 x i16> %a.zext, %b.zext
@@ -164,6 +222,14 @@ define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_b_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT: vmxor.mm v0, v0, v8
+; ZVABD-NEXT: vmv.v.i v8, 0
+; ZVABD-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
%sub = sub <vscale x 16 x i8> %a.zext, %b.zext
@@ -179,6 +245,12 @@ define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
%b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
%sub = sub <vscale x 8 x i32> %a.zext, %b.zext
@@ -197,6 +269,14 @@ define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
%sub = sub <vscale x 8 x i16> %a.zext, %b.zext
@@ -212,6 +292,12 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
%b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
%sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -230,6 +316,14 @@ define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
%b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
%sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -263,6 +357,14 @@ define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_d_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v10, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
%b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
%sub = sub <vscale x 2 x i64> %a.zext, %b.zext
@@ -281,6 +383,13 @@ define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_non_matching_extension:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vzext.vf4 v12, v10
+; ZVABD-NEXT: vabdu.vv v8, v8, v12
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
%b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
%sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -302,6 +411,15 @@ define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a,
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_non_matching_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v10, v8
+; ZVABD-NEXT: vabdu.vv v10, v10, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
%b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
%sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -323,6 +441,18 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
; CHECK-NEXT: vrsub.vi v8, v10, 0
; CHECK-NEXT: vmax.vv v8, v10, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_non_matching_promotion:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vzext.vf4 v10, v8
+; ZVABD-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vsext.vf2 v8, v9
+; ZVABD-NEXT: vwsub.wv v10, v10, v8
+; ZVABD-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vrsub.vi v8, v10, 0
+; ZVABD-NEXT: vmax.vv v8, v10, v8
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
%b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
%sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -345,3 +475,5 @@ declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index bd1209a17b53456..f61c17e18b0227f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
;
; SABD
;
@@ -14,6 +16,12 @@ define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_8b_as_16b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <8 x i8> %a to <8 x i16>
%b.sext = sext <8 x i8> %b to <8 x i16>
%sub = sub <8 x i16> %a.sext, %b.sext
@@ -31,6 +39,12 @@ define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_8b_as_32b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <8 x i8> %a to <8 x i32>
%b.sext = sext <8 x i8> %b to <8 x i32>
%sub = sub <8 x i32> %a.sext, %b.sext
@@ -48,6 +62,12 @@ define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_16b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <16 x i8> %a to <16 x i16>
%b.sext = sext <16 x i8> %b to <16 x i16>
%sub = sub <16 x i16> %a.sext, %b.sext
@@ -65,6 +85,12 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_4h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <4 x i16> %a to <4 x i32>
%b.sext = sext <4 x i16> %b to <4 x i32>
%sub = sub <4 x i32> %a.sext, %b.sext
@@ -84,6 +110,14 @@ define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_4h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT: vabd.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <4 x i8> %a to <4 x i16>
%b.sext = sext <4 x i8> %b to <4 x i16>
%sub = sub <4 x i16> %a.sext, %b.sext
@@ -100,6 +134,12 @@ define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_8h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <8 x i16> %a to <8 x i32>
%b.sext = sext <8 x i16> %b to <8 x i32>
%sub = sub <8 x i32> %a.sext, %b.sext
@@ -119,6 +159,14 @@ define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_8h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <8 x i8> %a to <8 x i16>
%b.sext = sext <8 x i8> %b to <8 x i16>
%sub = sub <8 x i16> %a.sext, %b.sext
@@ -135,6 +183,12 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_2s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <2 x i32> %a to <2 x i64>
%b.sext = sext <2 x i32> %b to <2 x i64>
%sub = sub <2 x i64> %a.sext, %b.sext
@@ -154,6 +208,14 @@ define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_2s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT: vabd.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <2 x i16> %a to <2 x i32>
%b.sext = sext <2 x i16> %b to <2 x i32>
%sub = sub <2 x i32> %a.sext, %b.sext
@@ -170,6 +232,12 @@ define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_4s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <4 x i32> %a to <4 x i64>
%b.sext = sext <4 x i32> %b to <4 x i64>
%sub = sub <4 x i64> %a.sext, %b.sext
@@ -189,6 +257,14 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_4s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <4 x i16> %a to <4 x i32>
%b.sext = sext <4 x i16> %b to <4 x i32>
%sub = sub <4 x i32> %a.sext, %b.sext
@@ -204,6 +280,12 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_2d:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <2 x i64> %a to <2 x i128>
%b.sext = sext <2 x i64> %b to <2 x i128>
%sub = sub <2 x i128> %a.sext, %b.sext
@@ -223,6 +305,14 @@ define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_2d_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT: vabd.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.sext = sext <2 x i32> %a to <2 x i64>
%b.sext = sext <2 x i32> %b to <2 x i64>
%sub = sub <2 x i64> %a.sext, %b.sext
@@ -243,6 +333,12 @@ define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_8b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <8 x i8> %a to <8 x i16>
%b.zext = zext <8 x i8> %b to <8 x i16>
%sub = sub <8 x i16> %a.zext, %b.zext
@@ -260,6 +356,12 @@ define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_16b:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <16 x i8> %a to <16 x i16>
%b.zext = zext <16 x i8> %b to <16 x i16>
%sub = sub <16 x i16> %a.zext, %b.zext
@@ -277,6 +379,12 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_4h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <4 x i16> %a to <4 x i32>
%b.zext = zext <4 x i16> %b to <4 x i32>
%sub = sub <4 x i32> %a.zext, %b.zext
@@ -296,6 +404,14 @@ define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_4h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT: vabdu.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <4 x i8> %a to <4 x i16>
%b.zext = zext <4 x i8> %b to <4 x i16>
%sub = sub <4 x i16> %a.zext, %b.zext
@@ -312,6 +428,12 @@ define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_8h:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <8 x i16> %a to <8 x i32>
%b.zext = zext <8 x i16> %b to <8 x i32>
%sub = sub <8 x i32> %a.zext, %b.zext
@@ -331,6 +453,14 @@ define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_8h_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <8 x i8> %a to <8 x i16>
%b.zext = zext <8 x i8> %b to <8 x i16>
%sub = sub <8 x i16> %a.zext, %b.zext
@@ -347,6 +477,12 @@ define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_2s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <2 x i32> %a to <2 x i64>
%b.zext = zext <2 x i32> %b to <2 x i64>
%sub = sub <2 x i64> %a.zext, %b.zext
@@ -366,6 +502,14 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_2s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT: vabdu.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <2 x i16> %a to <2 x i32>
%b.zext = zext <2 x i16> %b to <2 x i32>
%sub = sub <2 x i32> %a.zext, %b.zext
@@ -382,6 +526,12 @@ define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_4s:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <4 x i32> %a to <4 x i64>
%b.zext = zext <4 x i32> %b to <4 x i64>
%sub = sub <4 x i64> %a.zext, %b.zext
@@ -401,6 +551,14 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_4s_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <4 x i16> %a to <4 x i32>
%b.zext = zext <4 x i16> %b to <4 x i32>
%sub = sub <4 x i32> %a.zext, %b.zext
@@ -416,6 +574,12 @@ define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_2d:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <2 x i64> %a to <2 x i128>
%b.zext = zext <2 x i64> %b to <2 x i128>
%sub = sub <2 x i128> %a.zext, %b.zext
@@ -435,6 +599,14 @@ define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_2d_promoted_ops:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT: vabdu.vv v9, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT: vzext.vf2 v8, v9
+; ZVABD-NEXT: ret
%a.zext = zext <2 x i32> %a to <2 x i64>
%b.zext = zext <2 x i32> %b to <2 x i64>
%sub = sub <2 x i64> %a.zext, %b.zext
@@ -451,6 +623,14 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_v16i8_nuw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vsub.vv v8, v8, v9
+; ZVABD-NEXT: vrsub.vi v9, v8, 0
+; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nuw <16 x i8> %a, %b
%abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
ret <16 x i8> %abs
@@ -465,6 +645,14 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_v8i16_nuw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vsub.vv v8, v8, v9
+; ZVABD-NEXT: vrsub.vi v9, v8, 0
+; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nuw <8 x i16> %a, %b
%abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
ret <8 x i16> %abs
@@ -479,6 +667,14 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_v4i32_nuw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vsub.vv v8, v8, v9
+; ZVABD-NEXT: vrsub.vi v9, v8, 0
+; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nuw <4 x i32> %a, %b
%abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
ret <4 x i32> %abs
@@ -493,6 +689,14 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_v2i64_nuw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vsub.vv v8, v8, v9
+; ZVABD-NEXT: vrsub.vi v9, v8, 0
+; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nuw <2 x i64> %a, %b
%abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
ret <2 x i64> %abs
@@ -507,6 +711,12 @@ define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_v16i8_nsw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nsw <16 x i8> %a, %b
%abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
ret <16 x i8> %abs
@@ -521,6 +731,12 @@ define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_v8i16_nsw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nsw <8 x i16> %a, %b
%abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
ret <8 x i16> %abs
@@ -535,6 +751,12 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_v4i32_nsw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nsw <4 x i32> %a, %b
%abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
ret <4 x i32> %abs
@@ -549,6 +771,12 @@ define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_v2i64_nsw:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%sub = sub nsw <2 x i64> %a, %b
%abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
ret <2 x i64> %abs
@@ -563,6 +791,12 @@ define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: smaxmin_v16i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
%b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
%sub = sub <16 x i8> %a, %b
@@ -578,6 +812,12 @@ define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: smaxmin_v8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
%b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
%sub = sub <8 x i16> %a, %b
@@ -593,6 +833,12 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: smaxmin_v4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
%b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
%sub = sub <4 x i32> %a, %b
@@ -608,6 +854,12 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: smaxmin_v2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
%b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
%sub = sub <2 x i64> %a, %b
@@ -623,6 +875,12 @@ define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: umaxmin_v16i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
%b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
%sub = sub <16 x i8> %a, %b
@@ -638,6 +896,12 @@ define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: umaxmin_v8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
%b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
%sub = sub <8 x i16> %a, %b
@@ -653,6 +917,12 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: umaxmin_v4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
%b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
%sub = sub <4 x i32> %a, %b
@@ -668,6 +938,12 @@ define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: umaxmin_v2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
%b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
%sub = sub <2 x i64> %a, %b
@@ -683,6 +959,12 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: umaxmin_v16i8_com1:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: ret
%a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
%b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
%sub = sub <16 x i8> %a, %b
@@ -725,3 +1007,5 @@ declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32: {{.*}}
; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index 8da605d35270dee..62ec0543949a0dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
; CHECK-LABEL: sad_4x8_as_i16:
@@ -16,6 +18,18 @@ define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sad_4x8_as_i16:
+; ZVABD: # %bb.0: # %entry
+; ZVABD-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT: vmv.s.x v9, zero
+; ZVABD-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; ZVABD-NEXT: vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT: vmv.x.s a0, v8
+; ZVABD-NEXT: ret
entry:
%1 = zext <4 x i8> %a to <4 x i16>
%3 = zext <4 x i8> %b to <4 x i16>
@@ -38,6 +52,17 @@ define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vredsum.vs v8, v9, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sad_4x8_as_i32:
+; ZVABD: # %bb.0: # %entry
+; ZVABD-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vzext.vf4 v9, v8
+; ZVABD-NEXT: vmv.s.x v8, zero
+; ZVABD-NEXT: vredsum.vs v8, v9, v8
+; ZVABD-NEXT: vmv.x.s a0, v8
+; ZVABD-NEXT: ret
entry:
%1 = zext <4 x i8> %a to <4 x i32>
%3 = zext <4 x i8> %b to <4 x i32>
@@ -61,6 +86,18 @@ define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sad_16x8_as_i16:
+; ZVABD: # %bb.0: # %entry
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vmv.s.x v9, zero
+; ZVABD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vmv.x.s a0, v8
+; ZVABD-NEXT: ret
entry:
%1 = zext <16 x i8> %a to <16 x i16>
%3 = zext <16 x i8> %b to <16 x i16>
@@ -83,6 +120,17 @@ define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: vredsum.vs v8, v12, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sad_16x8_as_i32:
+; ZVABD: # %bb.0: # %entry
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT: vzext.vf4 v12, v8
+; ZVABD-NEXT: vmv.s.x v8, zero
+; ZVABD-NEXT: vredsum.vs v8, v12, v8
+; ZVABD-NEXT: vmv.x.s a0, v8
+; ZVABD-NEXT: ret
entry:
%1 = zext <16 x i8> %a to <16 x i32>
%3 = zext <16 x i8> %b to <16 x i32>
@@ -135,6 +183,41 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
; CHECK-NEXT: vredsum.vs v8, v20, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sad_2block_16xi8_as_i32:
+; ZVABD: # %bb.0: # %entry
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vle8.v v8, (a0)
+; ZVABD-NEXT: vle8.v v9, (a1)
+; ZVABD-NEXT: add a0, a0, a2
+; ZVABD-NEXT: add a1, a1, a3
+; ZVABD-NEXT: vle8.v v10, (a0)
+; ZVABD-NEXT: vle8.v v11, (a1)
+; ZVABD-NEXT: add a0, a0, a2
+; ZVABD-NEXT: add a1, a1, a3
+; ZVABD-NEXT: vle8.v v12, (a0)
+; ZVABD-NEXT: vle8.v v13, (a1)
+; ZVABD-NEXT: add a0, a0, a2
+; ZVABD-NEXT: add a1, a1, a3
+; ZVABD-NEXT: vabdu.vv v8, v8, v9
+; ZVABD-NEXT: vle8.v v9, (a0)
+; ZVABD-NEXT: vabdu.vv v10, v10, v11
+; ZVABD-NEXT: vle8.v v11, (a1)
+; ZVABD-NEXT: vwaddu.vv v14, v10, v8
+; ZVABD-NEXT: vabdu.vv v8, v12, v13
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v12, v8
+; ZVABD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v9, v11
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vwaddu.vv v16, v12, v14
+; ZVABD-NEXT: vzext.vf2 v10, v8
+; ZVABD-NEXT: vwaddu.wv v16, v16, v10
+; ZVABD-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT: vmv.s.x v8, zero
+; ZVABD-NEXT: vredsum.vs v8, v16, v8
+; ZVABD-NEXT: vmv.x.s a0, v8
+; ZVABD-NEXT: ret
entry:
%idx.ext8 = sext i32 %strideb to i64
%idx.ext = sext i32 %stridea to i64
>From 6fda7d64ce414c72096df1aaafb7152c6598f692 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Feb 2025 19:17:16 +0800
Subject: [PATCH 3/4] [RISCV][CodeGen] Lower abs to Zvabd instructions
For abs operation, we can synthesize it via vabd.vx with x0 register.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 21 +-
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 7 +
llvm/test/CodeGen/RISCV/rvv/abd.ll | 15 +-
llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll | 94 ++++++
llvm/test/CodeGen/RISCV/rvv/abs-vp.ll | 319 ++++++++++++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-abd.ll | 12 +-
.../CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll | 247 ++++++++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-abs.ll | 107 ++++++
8 files changed, 804 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d273e3c805bd970..7fe2d4aa87566f9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -834,7 +834,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Legal);
if (Subtarget.hasStdExtZvabd())
- setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+ setOperationAction({ISD::ABDS, ISD::ABDU, ISD::ABS}, VT, Legal);
else
setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
@@ -11834,17 +11834,22 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
} else
std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+ SDValue Result;
SDValue SplatZero = DAG.getNode(
RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
- SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
- DAG.getUNDEF(ContainerVT), Mask, VL);
- SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
- DAG.getUNDEF(ContainerVT), Mask, VL);
-
+ if (Subtarget.hasStdExtZvabd()) {
+ Result = DAG.getNode(RISCVISD::ABDS_VL, DL, ContainerVT, X, SplatZero,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+ } else {
+ SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+ Result = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+ DAG.getUNDEF(ContainerVT), Mask, VL);
+ }
if (VT.isFixedLengthVector())
- Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
- return Max;
+ Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+ return Result;
}
SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index d8a9d7f952e1d33..8fcc4a4f7b963ed 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -47,6 +47,13 @@ let Predicates = [HasStdExtZvabd] in {
defm : VPatBinarySDNode_VV_VX<abds, "PseudoVABD">;
defm : VPatBinarySDNode_VV_VX<abdu, "PseudoVABDU">;
+foreach vti = AllIntegerVectors in {
+ def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
+ (!cast<Instruction>("PseudoVABD_VX_"#vti.LMul.MX)
+ (vti.Vector (IMPLICIT_DEF)),
+ vti.RegClass:$rs2, (XLenVT X0), vti.AVL, vti.Log2SEW, TA_MA)>;
+}
+
defm : VPatBinaryVL_VV_VX<riscv_abds_vl, "PseudoVABD">;
defm : VPatBinaryVL_VV_VX<riscv_abdu_vl, "PseudoVABDU">;
} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index 0008e555e7cff8f..0423e6e9db6cb9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -155,6 +155,12 @@ define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: sabd_d:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vabd.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
%b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
%sub = sub <vscale x 2 x i128> %a.sext, %b.sext
@@ -339,6 +345,12 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: uabd_d:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vabdu.vv v8, v8, v10
+; ZVABD-NEXT: ret
%a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
%b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
%sub = sub <vscale x 2 x i128> %a.zext, %b.zext
@@ -450,8 +462,7 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
; ZVABD-NEXT: vsext.vf2 v8, v9
; ZVABD-NEXT: vwsub.wv v10, v10, v8
; ZVABD-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; ZVABD-NEXT: vrsub.vi v8, v10, 0
-; ZVABD-NEXT: vmax.vv v8, v10, v8
+; ZVABD-NEXT: vabs.v v8, v10
; ZVABD-NEXT: ret
%a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
%b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
index 589b9994651d24a..c02d07668ff60d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
@@ -1,6 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=ZVABD
declare <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16>, i1)
@@ -11,6 +15,12 @@ define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv1i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
ret <vscale x 1 x i16> %r
}
@@ -24,6 +34,12 @@ define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv2i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
ret <vscale x 2 x i16> %r
}
@@ -37,6 +53,12 @@ define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv4i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
ret <vscale x 4 x i16> %r
}
@@ -50,6 +72,12 @@ define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
ret <vscale x 8 x i16> %r
}
@@ -63,6 +91,12 @@ define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv16i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
ret <vscale x 16 x i16> %r
}
@@ -76,6 +110,12 @@ define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv32i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
ret <vscale x 32 x i16> %r
}
@@ -89,6 +129,12 @@ define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv1i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
ret <vscale x 1 x i32> %r
}
@@ -102,6 +148,12 @@ define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv2i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
ret <vscale x 2 x i32> %r
}
@@ -115,6 +167,12 @@ define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
ret <vscale x 4 x i32> %r
}
@@ -128,6 +186,12 @@ define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv8i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
ret <vscale x 8 x i32> %r
}
@@ -141,6 +205,12 @@ define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv16i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
ret <vscale x 16 x i32> %r
}
@@ -154,6 +224,12 @@ define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv1i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
ret <vscale x 1 x i64> %r
}
@@ -167,6 +243,12 @@ define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
ret <vscale x 2 x i64> %r
}
@@ -180,6 +262,12 @@ define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv4i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
ret <vscale x 4 x i64> %r
}
@@ -193,6 +281,12 @@ define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vabs_nxv8i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
ret <vscale x 8 x i64> %r
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index ee0016ec080e247..fc17fbdd53e8d0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -3,6 +3,10 @@
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
declare <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
@@ -13,6 +17,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1>
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i8> %v
}
@@ -24,6 +34,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i8> %v
}
@@ -37,6 +53,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1>
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %v
}
@@ -48,6 +70,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i8> %v
}
@@ -61,6 +89,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1>
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i8> %v
}
@@ -72,6 +106,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i8> %v
}
@@ -85,6 +125,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1>
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i8> %v
}
@@ -96,6 +142,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x i8> %v
}
@@ -109,6 +161,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x i8> %v
}
@@ -120,6 +178,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x i8> %v
}
@@ -133,6 +197,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i8> %v
}
@@ -144,6 +214,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x i8> %v
}
@@ -157,6 +233,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
ret <vscale x 64 x i8> %v
}
@@ -168,6 +250,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
ret <vscale x 64 x i8> %v
}
@@ -181,6 +269,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i16> %v
}
@@ -192,6 +286,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i16> %v
}
@@ -205,6 +305,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i16> %v
}
@@ -216,6 +322,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i16> %v
}
@@ -229,6 +341,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i16> %v
}
@@ -240,6 +358,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i16> %v
}
@@ -253,6 +377,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i16> %v
}
@@ -264,6 +394,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x i16> %v
}
@@ -277,6 +413,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 1
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x i16> %v
}
@@ -288,6 +430,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x i16> %v
}
@@ -301,6 +449,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 3
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
}
@@ -312,6 +466,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x i16> %v
}
@@ -325,6 +485,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i32> %v
}
@@ -336,6 +502,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i32> %v
}
@@ -349,6 +521,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i32> %v
}
@@ -360,6 +538,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i32> %v
}
@@ -373,6 +557,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i32> %v
}
@@ -384,6 +574,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i32> %v
}
@@ -397,6 +593,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i32> %v
}
@@ -408,6 +610,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x i32> %v
}
@@ -421,6 +629,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 1
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x i32> %v
}
@@ -432,6 +646,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x i32> %v
}
@@ -445,6 +665,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i64> %v
}
@@ -456,6 +682,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x i64> %v
}
@@ -469,6 +701,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i64> %v
}
@@ -480,6 +718,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}
@@ -493,6 +737,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i64> %v
}
@@ -504,6 +754,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i64> %v
}
@@ -517,6 +773,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x i64> %v
}
@@ -528,6 +790,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 z
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x i64> %v
}
@@ -541,6 +809,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i64> %v
}
@@ -552,6 +826,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x i64> %v
}
@@ -607,6 +887,28 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; ZVABD-NEXT: vmv1r.v v24, v0
+; ZVABD-NEXT: csrr a1, vlenb
+; ZVABD-NEXT: srli a2, a1, 3
+; ZVABD-NEXT: sub a3, a0, a1
+; ZVABD-NEXT: vslidedown.vx v0, v0, a2
+; ZVABD-NEXT: sltu a2, a0, a3
+; ZVABD-NEXT: addi a2, a2, -1
+; ZVABD-NEXT: and a2, a2, a3
+; ZVABD-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v16, v16, v0.t
+; ZVABD-NEXT: bltu a0, a1, .LBB46_2
+; ZVABD-NEXT: # %bb.1:
+; ZVABD-NEXT: mv a0, a1
+; ZVABD-NEXT: .LBB46_2:
+; ZVABD-NEXT: vmv1r.v v0, v24
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x i64> %v
}
@@ -630,6 +932,23 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i3
; CHECK-NEXT: vrsub.vi v24, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v24
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: csrr a1, vlenb
+; ZVABD-NEXT: sub a2, a0, a1
+; ZVABD-NEXT: sltu a3, a0, a2
+; ZVABD-NEXT: addi a3, a3, -1
+; ZVABD-NEXT: and a2, a3, a2
+; ZVABD-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v16, v16
+; ZVABD-NEXT: bltu a0, a1, .LBB47_2
+; ZVABD-NEXT: # %bb.1:
+; ZVABD-NEXT: mv a0, a1
+; ZVABD-NEXT: .LBB47_2:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x i64> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index f61c17e18b0227f..a17f16c36ec5a0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -628,8 +628,7 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
; ZVABD: # %bb.0:
; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; ZVABD-NEXT: vsub.vv v8, v8, v9
-; ZVABD-NEXT: vrsub.vi v9, v8, 0
-; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: vabs.v v8, v8
; ZVABD-NEXT: ret
%sub = sub nuw <16 x i8> %a, %b
%abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
@@ -650,8 +649,7 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
; ZVABD: # %bb.0:
; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVABD-NEXT: vsub.vv v8, v8, v9
-; ZVABD-NEXT: vrsub.vi v9, v8, 0
-; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: vabs.v v8, v8
; ZVABD-NEXT: ret
%sub = sub nuw <8 x i16> %a, %b
%abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
@@ -672,8 +670,7 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
; ZVABD: # %bb.0:
; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; ZVABD-NEXT: vsub.vv v8, v8, v9
-; ZVABD-NEXT: vrsub.vi v9, v8, 0
-; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: vabs.v v8, v8
; ZVABD-NEXT: ret
%sub = sub nuw <4 x i32> %a, %b
%abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
@@ -694,8 +691,7 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
; ZVABD: # %bb.0:
; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; ZVABD-NEXT: vsub.vv v8, v8, v9
-; ZVABD-NEXT: vrsub.vi v9, v8, 0
-; ZVABD-NEXT: vmax.vv v8, v8, v9
+; ZVABD-NEXT: vabs.v v8, v8
; ZVABD-NEXT: ret
%sub = sub nuw <2 x i64> %a, %b
%abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index 84da351de76ba97..2228f74bd20c0b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -3,6 +3,10 @@
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
@@ -13,6 +17,12 @@ define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
ret <2 x i8> %v
}
@@ -24,6 +34,12 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x i8> %v
}
@@ -37,6 +53,12 @@ define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
ret <4 x i8> %v
}
@@ -48,6 +70,12 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}
@@ -61,6 +89,12 @@ define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
ret <8 x i8> %v
}
@@ -72,6 +106,12 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x i8> %v
}
@@ -85,6 +125,12 @@ define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
ret <16 x i8> %v
}
@@ -96,6 +142,12 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i8_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x i8> %v
}
@@ -109,6 +161,12 @@ define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
ret <2 x i16> %v
}
@@ -120,6 +178,12 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x i16> %v
}
@@ -133,6 +197,12 @@ define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
ret <4 x i16> %v
}
@@ -144,6 +214,12 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}
@@ -157,6 +233,12 @@ define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
ret <8 x i16> %v
}
@@ -168,6 +250,12 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x i16> %v
}
@@ -181,6 +269,12 @@ define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
ret <16 x i16> %v
}
@@ -192,6 +286,12 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i16_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x i16> %v
}
@@ -205,6 +305,12 @@ define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
ret <2 x i32> %v
}
@@ -216,6 +322,12 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x i32> %v
}
@@ -229,6 +341,12 @@ define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
ret <4 x i32> %v
}
@@ -240,6 +358,12 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}
@@ -253,6 +377,12 @@ define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
ret <8 x i32> %v
}
@@ -264,6 +394,12 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x i32> %v
}
@@ -277,6 +413,12 @@ define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
ret <16 x i32> %v
}
@@ -288,6 +430,12 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i32_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x i32> %v
}
@@ -301,6 +449,12 @@ define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
ret <2 x i64> %v
}
@@ -312,6 +466,12 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v2i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x i64> %v
}
@@ -325,6 +485,12 @@ define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
ret <4 x i64> %v
}
@@ -336,6 +502,12 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v10, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v4i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}
@@ -349,6 +521,12 @@ define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
ret <8 x i64> %v
}
@@ -360,6 +538,12 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v12, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v8i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x i64> %v
}
@@ -373,6 +557,12 @@ define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v15i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
ret <15 x i64> %v
}
@@ -384,6 +574,12 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v15i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x i64> %v
}
@@ -397,6 +593,12 @@ define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: ret
%v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
ret <16 x i64> %v
}
@@ -408,6 +610,12 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v16, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v16i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: ret
%v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x i64> %v
}
@@ -437,6 +645,27 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t
; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v32i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: li a2, 16
+; ZVABD-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; ZVABD-NEXT: vslidedown.vi v24, v0, 2
+; ZVABD-NEXT: mv a1, a0
+; ZVABD-NEXT: bltu a0, a2, .LBB34_2
+; ZVABD-NEXT: # %bb.1:
+; ZVABD-NEXT: li a1, 16
+; ZVABD-NEXT: .LBB34_2:
+; ZVABD-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8, v0.t
+; ZVABD-NEXT: addi a1, a0, -16
+; ZVABD-NEXT: sltu a0, a0, a1
+; ZVABD-NEXT: addi a0, a0, -1
+; ZVABD-NEXT: and a0, a0, a1
+; ZVABD-NEXT: vmv1r.v v0, v24
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v16, v16, v0.t
+; ZVABD-NEXT: ret
%v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
ret <32 x i64> %v
}
@@ -461,6 +690,24 @@ define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; CHECK-NEXT: vrsub.vi v24, v16, 0
; CHECK-NEXT: vmax.vv v16, v16, v24
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: vp_abs_v32i64_unmasked:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: li a2, 16
+; ZVABD-NEXT: mv a1, a0
+; ZVABD-NEXT: bltu a0, a2, .LBB35_2
+; ZVABD-NEXT: # %bb.1:
+; ZVABD-NEXT: li a1, 16
+; ZVABD-NEXT: .LBB35_2:
+; ZVABD-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: addi a1, a0, -16
+; ZVABD-NEXT: sltu a0, a0, a1
+; ZVABD-NEXT: addi a0, a0, -1
+; ZVABD-NEXT: and a0, a0, a1
+; ZVABD-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT: vabs.v v16, v16
+; ZVABD-NEXT: ret
%v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x i64> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 3153b44386d7aed..1b4f9ca6a1bc37f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -1,6 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=ZVABD
define void @abs_v16i8(ptr %x) {
; CHECK-LABEL: abs_v16i8:
@@ -11,6 +15,14 @@ define void @abs_v16i8(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v16i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT: vle8.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse8.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <16 x i8>, ptr %x
%b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
store <16 x i8> %b, ptr %x
@@ -27,6 +39,14 @@ define void @abs_v8i16(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v8i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT: vle16.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse16.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <8 x i16>, ptr %x
%b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
store <8 x i16> %b, ptr %x
@@ -43,6 +63,14 @@ define void @abs_v6i16(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v6i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; ZVABD-NEXT: vle16.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse16.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <6 x i16>, ptr %x
%b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
store <6 x i16> %b, ptr %x
@@ -59,6 +87,14 @@ define void @abs_v4i32(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vle32.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse32.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <4 x i32>, ptr %x
%b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
store <4 x i32> %b, ptr %x
@@ -75,6 +111,14 @@ define void @abs_v2i64(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v2i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT: vle64.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse64.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <2 x i64>, ptr %x
%b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
store <2 x i64> %b, ptr %x
@@ -92,6 +136,15 @@ define void @abs_v32i8(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v32i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: li a1, 32
+; ZVABD-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; ZVABD-NEXT: vle8.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse8.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
store <32 x i8> %b, ptr %x
@@ -108,6 +161,14 @@ define void @abs_v16i16(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v16i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; ZVABD-NEXT: vle16.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse16.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <16 x i16>, ptr %x
%b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
store <16 x i16> %b, ptr %x
@@ -124,6 +185,14 @@ define void @abs_v8i32(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v8i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; ZVABD-NEXT: vle32.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse32.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <8 x i32>, ptr %x
%b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
store <8 x i32> %b, ptr %x
@@ -140,6 +209,14 @@ define void @abs_v4i64(ptr %x) {
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v4i64:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; ZVABD-NEXT: vle64.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vse64.v v8, (a0)
+; ZVABD-NEXT: ret
%a = load <4 x i64>, ptr %x
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
store <4 x i64> %b, ptr %x
@@ -158,6 +235,16 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
; CHECK-NEXT: vzext.vf8 v10, v8
; CHECK-NEXT: vse64.v v10, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i8:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT: vle8.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vzext.vf8 v10, v8
+; ZVABD-NEXT: vse64.v v10, (a0)
+; ZVABD-NEXT: ret
%a = load <4 x i8>, ptr %x
%a.ext = sext <4 x i8> %a to <4 x i64>
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -176,6 +263,16 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
; CHECK-NEXT: vzext.vf4 v10, v8
; CHECK-NEXT: vse64.v v10, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i16:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT: vle16.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vzext.vf4 v10, v8
+; ZVABD-NEXT: vse64.v v10, (a0)
+; ZVABD-NEXT: ret
%a = load <4 x i16>, ptr %x
%a.ext = sext <4 x i16> %a to <4 x i64>
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -194,6 +291,16 @@ define void @abs_v4i64_of_sext_v4i32(ptr %x) {
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vse64.v v10, (a0)
; CHECK-NEXT: ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i32:
+; ZVABD: # %bb.0:
+; ZVABD-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT: vle32.v v8, (a0)
+; ZVABD-NEXT: vabs.v v8, v8
+; ZVABD-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v10, v8
+; ZVABD-NEXT: vse64.v v10, (a0)
+; ZVABD-NEXT: ret
%a = load <4 x i32>, ptr %x
%a.ext = sext <4 x i32> %a to <4 x i64>
%b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
>From 2439f4b0e39ab538b9b5a58a79501fb4b9924e46 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 6 Feb 2025 16:16:04 +0800
Subject: [PATCH 4/4] [RISCV][CodeGen] Combine vwaddu+vabd(u) to vwabdacc(u)
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 46 ++++++++++++++++++-
llvm/lib/Target/RISCV/RISCVISelLowering.h | 5 +-
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 24 ++++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-sad.ll | 20 ++++----
4 files changed, 83 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7fe2d4aa87566f9..bdb58d0e8194442 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6558,7 +6558,7 @@ static bool hasPassthruOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 129 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 131 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::ADD_VL && Opcode <= RISCVISD::VFMAX_VL)
@@ -6582,7 +6582,7 @@ static bool hasMaskOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 129 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 131 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::TRUNCATE_VECTOR_VL && Opcode <= RISCVISD::SETCC_VL)
@@ -15979,6 +15979,44 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
N->getFlags());
}
+// vwaddu C (vabd A B) -> vwabdacc(A B C)
+// vwaddu C (vabdu A B) -> vwabdaccu(A B C)
+static SDValue performVWABDACCCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ if (!Subtarget.hasStdExtZvabd())
+ SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ SDValue Passthru = N->getOperand(2);
+ if (!Passthru->isUndef())
+ return SDValue();
+
+ SDValue Mask = N->getOperand(3);
+ SDValue VL = N->getOperand(4);
+ auto IsABD = [](SDValue Op) {
+ if (Op->getOpcode() != RISCVISD::ABDS_VL &&
+ Op->getOpcode() != RISCVISD::ABDU_VL)
+ return SDValue();
+ return Op;
+ };
+
+ SDValue Diff = IsABD(Op0);
+ Diff = Diff ? IsABD(Op1) : Diff;
+ if (!Diff)
+ return SDValue();
+ SDValue Acc = Diff == Op0 ? Op1 : Op0;
+
+ SDLoc DL(N);
+ MVT VT = N->getSimpleValueType(0);
+ Acc = DAG.getNode(RISCVISD::VZEXT_VL, DL, VT, Acc, Mask, VL);
+ SDValue Result = DAG.getNode(
+ Diff.getOpcode() == RISCVISD::ABDS_VL ? RISCVISD::VWABDACC_VL
+ : RISCVISD::VWABDACCU_VL,
+ DL, VT, Diff.getOperand(0), Diff.getOperand(1), Acc, Mask, VL);
+ return Result;
+}
+
static SDValue performVWADDSUBW_VLCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const RISCVSubtarget &Subtarget) {
@@ -18579,6 +18617,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
return V;
return combineToVWMACC(N, DAG, Subtarget);
+ case RISCVISD::VWADDU_VL:
+ return performVWABDACCCombine(N, DAG, Subtarget);
case RISCVISD::VWADD_W_VL:
case RISCVISD::VWADDU_W_VL:
case RISCVISD::VWSUB_W_VL:
@@ -21332,6 +21372,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(VFIRST_VL)
NODE_NAME_CASE(ABDS_VL)
NODE_NAME_CASE(ABDU_VL)
+ NODE_NAME_CASE(VWABDACC_VL)
+ NODE_NAME_CASE(VWABDACCU_VL)
NODE_NAME_CASE(READ_CSR)
NODE_NAME_CASE(WRITE_CSR)
NODE_NAME_CASE(SWAP_CSR)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index bacbd7c2040fc94..1fc39701ab0f365 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -406,7 +406,10 @@ enum NodeType : unsigned {
// vfirst.m with additional mask and VL operands.
VFIRST_VL,
- LAST_VL_VECTOR_OP = VFIRST_VL,
+ // Vector Absolute Difference and Accumulate
+ VWABDACC_VL,
+ VWABDACCU_VL,
+ LAST_VL_VECTOR_OP = VWABDACCU_VL,
// Read VLENB CSR
READ_VLENB,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 8fcc4a4f7b963ed..034033f4223c55b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -30,9 +30,25 @@ let Predicates = [HasStdExtZvabd] in {
//===----------------------------------------------------------------------===//
// Pseudos
//===----------------------------------------------------------------------===//
+multiclass VPseudoVWABD_VV_VX {
+ foreach m = MxListW in {
+ defvar mx = m.MX;
+ defm "" : VPseudoTernaryW_VV<m, Commutable = 1>,
+ SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV",
+ "ReadVIWMulAddV", "ReadVIWMulAddV", mx>;
+ defm "" : VPseudoTernaryW_VX<m>,
+ SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV",
+ "ReadVIWMulAddX", "ReadVIWMulAddV", mx>;
+ }
+}
+
let Predicates = [HasStdExtZvabd] in {
defm PseudoVABD : VPseudoVALU_VV_VX<Commutable = 1>;
defm PseudoVABDU : VPseudoVALU_VV_VX<Commutable = 1>;
+ let IsRVVWideningReduction = 1 in {
+ defm PseudoVWABDACC : VPseudoVWABD_VV_VX;
+ defm PseudoVWABDACCU : VPseudoVWABD_VV_VX;
+ } // IsRVVWideningReduction = 1
} // Predicates = [HasStdExtZvabd]
//===----------------------------------------------------------------------===//
@@ -43,6 +59,11 @@ def riscv_abds_vl
def riscv_abdu_vl
: SDNode<"RISCVISD::ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def rvv_vwabdacc_vl : SDNode<"RISCVISD::VWABDACC_VL",
+ SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
+def rvv_vwabdaccu_vl : SDNode<"RISCVISD::VWABDACCU_VL",
+ SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
+
let Predicates = [HasStdExtZvabd] in {
defm : VPatBinarySDNode_VV_VX<abds, "PseudoVABD">;
defm : VPatBinarySDNode_VV_VX<abdu, "PseudoVABDU">;
@@ -56,4 +77,7 @@ foreach vti = AllIntegerVectors in {
defm : VPatBinaryVL_VV_VX<riscv_abds_vl, "PseudoVABD">;
defm : VPatBinaryVL_VV_VX<riscv_abdu_vl, "PseudoVABDU">;
+
+defm : VPatWidenMultiplyAddVL_VV_VX<rvv_vwabdacc_vl, "PseudoVWABDACC">;
+defm : VPatWidenMultiplyAddVL_VV_VX<rvv_vwabdaccu_vl, "PseudoVWABDACCU">;
} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index 62ec0543949a0dc..cd79ff5f5dca47e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -199,23 +199,25 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
; ZVABD-NEXT: vle8.v v13, (a1)
; ZVABD-NEXT: add a0, a0, a2
; ZVABD-NEXT: add a1, a1, a3
-; ZVABD-NEXT: vabdu.vv v8, v8, v9
-; ZVABD-NEXT: vle8.v v9, (a0)
+; ZVABD-NEXT: vle8.v v14, (a0)
+; ZVABD-NEXT: vle8.v v15, (a1)
; ZVABD-NEXT: vabdu.vv v10, v10, v11
-; ZVABD-NEXT: vle8.v v11, (a1)
-; ZVABD-NEXT: vwaddu.vv v14, v10, v8
+; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT: vzext.vf2 v16, v10
+; ZVABD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT: vwabdaccu.vv v16, v8, v9
; ZVABD-NEXT: vabdu.vv v8, v12, v13
; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; ZVABD-NEXT: vzext.vf2 v12, v8
+; ZVABD-NEXT: vzext.vf2 v10, v8
; ZVABD-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT: vabdu.vv v8, v9, v11
+; ZVABD-NEXT: vabdu.vv v8, v14, v15
; ZVABD-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; ZVABD-NEXT: vwaddu.vv v16, v12, v14
+; ZVABD-NEXT: vwaddu.vv v12, v10, v16
; ZVABD-NEXT: vzext.vf2 v10, v8
-; ZVABD-NEXT: vwaddu.wv v16, v16, v10
+; ZVABD-NEXT: vwaddu.wv v12, v12, v10
; ZVABD-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVABD-NEXT: vmv.s.x v8, zero
-; ZVABD-NEXT: vredsum.vs v8, v16, v8
+; ZVABD-NEXT: vredsum.vs v8, v12, v8
; ZVABD-NEXT: vmv.x.s a0, v8
; ZVABD-NEXT: ret
entry:
More information about the cfe-commits
mailing list