[llvm] [RISCV] Combine ({s,u}{div,rem} (zext, zext)) -> (zext ({s,u}{div,rem} (zext, zext))) (PR #86779)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 27 00:37:17 PDT 2024
https://github.com/lukel97 created https://github.com/llvm/llvm-project/pull/86779
This narrows unsigned and signed div and rem nodes via combineBinOpOfZExt.
Unlike other binary ops, there are no widening div or rem instructions. So we will end up with an extra vzext.vf2.
However I'm assuming that div/rem are be expensive enough that by reducing their EMUL we will gain back the cost.
Alive2 proof: https://alive2.llvm.org/ce/z/Et_L6y
>From 5af5419faed85117f12375207b6575e56b62817d Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 27 Mar 2024 15:27:13 +0800
Subject: [PATCH] [RISCV] Combine ({s,u}{div,rem} (zext, zext)) -> (zext
({s,u}{div,rem} (zext, zext)))
This narrows unsigned and signed div and rem nodes via combineBinOpOfZExt.
Unlike other binary ops, there are no widening div or rem instructions. So we will end up with an extra vzext.vf2.
However I'm assuming that div/rem are be expensive enough that by reducing their EMUL we will gain back the cost.
Alive2 proof: https://alive2.llvm.org/ce/z/Et_L6y
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 ++++++--
llvm/test/CodeGen/RISCV/rvv/binop-zext.ll | 40 ++++++++++++---------
2 files changed, 36 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ca78648c6aa9d8..192a9da970a220 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1427,6 +1427,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL,
+ ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
ISD::INSERT_VECTOR_ELT, ISD::ABS});
if (Subtarget.hasVendorXTHeadMemPair())
setTargetDAGCombine({ISD::LOAD, ISD::STORE});
@@ -12936,12 +12937,14 @@ static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
// add (zext, zext) -> zext (add (zext, zext))
// sub (zext, zext) -> sext (sub (zext, zext))
// mul (zext, zext) -> zext (mul (zext, zext))
+// sdiv (zext, zext) -> zext (sdiv (zext, zext))
+// udiv (zext, zext) -> zext (udiv (zext, zext))
+// srem (zext, zext) -> zext (srem (zext, zext))
+// urem (zext, zext) -> zext (urem (zext, zext))
//
// where the sum of the extend widths match, and the the range of the bin op
// fits inside the width of the narrower bin op. (For profitability on rvv, we
// use a power of two for both inner and outer extend.)
-//
-// TODO: Extend this to other binary ops
static SDValue combineBinOpOfZExt(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
@@ -15894,6 +15897,13 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (SDValue V = combineBinOp_VLToVWBinOp_VL(N, DCI, Subtarget))
return V;
return performMULCombine(N, DAG);
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SREM:
+ case ISD::UREM:
+ if (SDValue V = combineBinOpOfZExt(N, DAG))
+ return V;
+ break;
case ISD::FADD:
case ISD::UMAX:
case ISD::UMIN:
diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
index e050240f0de114..2d5258fbabfac3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
@@ -50,10 +50,12 @@ define <vscale x 8 x i32> @mul(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; CHECK-LABEL: sdiv:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vdivu.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
@@ -64,10 +66,12 @@ define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; CHECK-LABEL: udiv:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vdivu.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
@@ -78,10 +82,12 @@ define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; CHECK-LABEL: srem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vremu.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
@@ -92,10 +98,12 @@ define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
define <vscale x 8 x i32> @urem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; CHECK-LABEL: urem:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vremu.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
More information about the llvm-commits
mailing list