[llvm] 32b9961 - [RISCV] Custom promote i32 UADDSAT/USUBSAT for -riscv-experimental-rv64-legal-i32 with Zbb.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Feb 4 12:42:58 PST 2024
Author: Craig Topper
Date: 2024-02-04T12:39:13-08:00
New Revision: 32b99617acbc4773caee45df10a7fd602b8db0ff
URL: https://github.com/llvm/llvm-project/commit/32b99617acbc4773caee45df10a7fd602b8db0ff
DIFF: https://github.com/llvm/llvm-project/commit/32b99617acbc4773caee45df10a7fd602b8db0ff.diff
LOG: [RISCV] Custom promote i32 UADDSAT/USUBSAT for -riscv-experimental-rv64-legal-i32 with Zbb.
Added:
llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll
llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll
llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll
llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index eca560f08e22f..5786ad9ad6482 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -282,8 +282,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
MVT::i32, Custom);
setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
MVT::i32, Custom);
- } else
+ } else {
setOperationAction(ISD::SSUBO, MVT::i32, Custom);
+ if (Subtarget.hasStdExtZbb())
+ setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Custom);
+ }
setOperationAction(ISD::SADDO, MVT::i32, Custom);
} else {
setLibcallName(
@@ -5357,6 +5360,20 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
return Op;
}
+static SDValue lowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
+ "Unexpected custom legalisation");
+
+ // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
+ // sign extend allows overflow of the lower 32 bits to be detected on
+ // the promoted size.
+ SDLoc DL(Op);
+ SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
+ SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
+ SDValue WideOp = DAG.getNode(Op.getOpcode(), DL, MVT::i64, LHS, RHS);
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
+}
+
// Custom lower i32 SADDO/SSUBO with RV64LegalI32 so we take advantage of addw.
static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG) {
assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
@@ -5368,7 +5385,8 @@ static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue WideOp = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
+ SDValue WideOp =
+ DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, WideOp,
DAG.getValueType(MVT::i32));
@@ -6618,14 +6636,17 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::AVGFLOORU:
case ISD::AVGCEILU:
case ISD::SADDSAT:
- case ISD::UADDSAT:
case ISD::SSUBSAT:
- case ISD::USUBSAT:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
case ISD::UMAX:
return lowerToScalableOp(Op, DAG);
+ case ISD::UADDSAT:
+ case ISD::USUBSAT:
+ if (!Op.getValueType().isVector())
+ return lowerUADDSAT_USUBSAT(Op, DAG);
+ return lowerToScalableOp(Op, DAG);
case ISD::ABS:
case ISD::VP_ABS:
return lowerABS(Op, DAG);
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll
new file mode 100644
index 0000000000000..6b42631896a25
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
+
+declare i4 @llvm.uadd.sat.i4(i4, i4)
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV64I-LABEL: func:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addw a1, a0, a1
+; RV64I-NEXT: sltu a0, a1, a0
+; RV64I-NEXT: negw a0, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: not a2, a1
+; RV64IZbb-NEXT: minu a0, a0, a2
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
+ ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV64I-LABEL: func2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: add a1, a0, a1
+; RV64I-NEXT: sltu a0, a1, a0
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func2:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: not a2, a1
+; RV64IZbb-NEXT: minu a0, a0, a2
+; RV64IZbb-NEXT: add a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
+ ret i64 %tmp;
+}
+
+define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
+; RV64I-LABEL: func16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: lui a1, 16
+; RV64I-NEXT: addiw a1, a1, -1
+; RV64I-NEXT: bltu a0, a1, .LBB2_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: .LBB2_2:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func16:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: lui a1, 16
+; RV64IZbb-NEXT: addiw a1, a1, -1
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y);
+ ret i16 %tmp;
+}
+
+define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
+; RV64I-LABEL: func8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: li a1, 255
+; RV64I-NEXT: bltu a0, a1, .LBB3_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: li a0, 255
+; RV64I-NEXT: .LBB3_2:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func8:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: li a1, 255
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y);
+ ret i8 %tmp;
+}
+
+define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
+; RV64I-LABEL: func3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: li a1, 15
+; RV64I-NEXT: bltu a0, a1, .LBB4_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: li a0, 15
+; RV64I-NEXT: .LBB4_2:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func3:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: li a1, 15
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
+ ret i4 %tmp;
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll
new file mode 100644
index 0000000000000..7aad0c3f22c8b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll
@@ -0,0 +1,141 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
+
+declare i4 @llvm.uadd.sat.i4(i4, i4)
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV64I-LABEL: func32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: addw a1, a0, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: sltu a0, a1, a0
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func32:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: not a2, a1
+; RV64IZbb-NEXT: sext.w a0, a0
+; RV64IZbb-NEXT: minu a0, a0, a2
+; RV64IZbb-NEXT: add a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i32 %y, %z
+ %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %a)
+ ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: func64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: add a2, a0, a2
+; RV64I-NEXT: sltu a0, a2, a0
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: or a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func64:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: not a1, a2
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: add a0, a0, a2
+; RV64IZbb-NEXT: ret
+ %a = mul i64 %y, %z
+ %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %z)
+ ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV64I-LABEL: func16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a3, 16
+; RV64I-NEXT: addiw a3, a3, -1
+; RV64I-NEXT: and a0, a0, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: bltu a0, a3, .LBB2_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a0, a3
+; RV64I-NEXT: .LBB2_2:
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func16:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: lui a3, 16
+; RV64IZbb-NEXT: addiw a3, a3, -1
+; RV64IZbb-NEXT: and a0, a0, a3
+; RV64IZbb-NEXT: mul a1, a1, a2
+; RV64IZbb-NEXT: and a1, a1, a3
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: minu a0, a0, a3
+; RV64IZbb-NEXT: ret
+ %a = mul i16 %y, %z
+ %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %a)
+ ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV64I-LABEL: func8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: li a1, 255
+; RV64I-NEXT: bltu a0, a1, .LBB3_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: li a0, 255
+; RV64I-NEXT: .LBB3_2:
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func8:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: andi a0, a0, 255
+; RV64IZbb-NEXT: mul a1, a1, a2
+; RV64IZbb-NEXT: andi a1, a1, 255
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: li a1, 255
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i8 %y, %z
+ %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %a)
+ ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV64I-LABEL: func4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 15
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: andi a1, a1, 15
+; RV64I-NEXT: addw a0, a0, a1
+; RV64I-NEXT: li a1, 15
+; RV64I-NEXT: bltu a0, a1, .LBB4_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: li a0, 15
+; RV64I-NEXT: .LBB4_2:
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func4:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: andi a0, a0, 15
+; RV64IZbb-NEXT: mul a1, a1, a2
+; RV64IZbb-NEXT: andi a1, a1, 15
+; RV64IZbb-NEXT: addw a0, a0, a1
+; RV64IZbb-NEXT: li a1, 15
+; RV64IZbb-NEXT: minu a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i4 %y, %z
+ %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %a)
+ ret i4 %tmp
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll
new file mode 100644
index 0000000000000..beca180785a03
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
+
+declare i4 @llvm.usub.sat.i4(i4, i4)
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i16 @llvm.usub.sat.i16(i16, i16)
+declare i32 @llvm.usub.sat.i32(i32, i32)
+declare i64 @llvm.usub.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV64I-LABEL: func:
+; RV64I: # %bb.0:
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addiw a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: subw a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
+ ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV64I-LABEL: func2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sub a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func2:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: sub a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
+ ret i64 %tmp;
+}
+
+define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
+; RV64I-LABEL: func16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func16:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: subw a0, a0, a1
+; RV64IZbb-NEXT: slli a0, a0, 32
+; RV64IZbb-NEXT: srli a0, a0, 32
+; RV64IZbb-NEXT: ret
+ %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y);
+ ret i16 %tmp;
+}
+
+define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
+; RV64I-LABEL: func8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func8:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: subw a0, a0, a1
+; RV64IZbb-NEXT: slli a0, a0, 32
+; RV64IZbb-NEXT: srli a0, a0, 32
+; RV64IZbb-NEXT: ret
+ %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y);
+ ret i8 %tmp;
+}
+
+define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
+; RV64I-LABEL: func3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func3:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: subw a0, a0, a1
+; RV64IZbb-NEXT: slli a0, a0, 32
+; RV64IZbb-NEXT: srli a0, a0, 32
+; RV64IZbb-NEXT: ret
+ %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
+ ret i4 %tmp;
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll
new file mode 100644
index 0000000000000..b95a77dc1f6b2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb \
+; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s --check-prefix=RV64IZbb
+
+declare i4 @llvm.usub.sat.i4(i4, i4)
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i16 @llvm.usub.sat.i16(i16, i16)
+declare i32 @llvm.usub.sat.i32(i32, i32)
+declare i64 @llvm.usub.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV64I-LABEL: func32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func32:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: sext.w a0, a0
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: sub a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i32 %y, %z
+ %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a)
+ ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: func64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sub a1, a0, a2
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func64:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: maxu a0, a0, a2
+; RV64IZbb-NEXT: sub a0, a0, a2
+; RV64IZbb-NEXT: ret
+ %a = mul i64 %y, %z
+ %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z)
+ ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV64I-LABEL: func16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a3, 16
+; RV64I-NEXT: addi a3, a3, -1
+; RV64I-NEXT: and a0, a0, a3
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: and a1, a1, a3
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func16:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: lui a3, 16
+; RV64IZbb-NEXT: addiw a3, a3, -1
+; RV64IZbb-NEXT: and a0, a0, a3
+; RV64IZbb-NEXT: mulw a1, a1, a2
+; RV64IZbb-NEXT: and a1, a1, a3
+; RV64IZbb-NEXT: sext.w a0, a0
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: sub a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i16 %y, %z
+ %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a)
+ ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV64I-LABEL: func8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func8:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: andi a0, a0, 255
+; RV64IZbb-NEXT: mul a1, a1, a2
+; RV64IZbb-NEXT: andi a1, a1, 255
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: sub a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i8 %y, %z
+ %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a)
+ ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV64I-LABEL: func4:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 15
+; RV64I-NEXT: mul a1, a1, a2
+; RV64I-NEXT: andi a1, a1, 15
+; RV64I-NEXT: subw a1, a0, a1
+; RV64I-NEXT: sltu a0, a0, a1
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IZbb-LABEL: func4:
+; RV64IZbb: # %bb.0:
+; RV64IZbb-NEXT: andi a0, a0, 15
+; RV64IZbb-NEXT: mul a1, a1, a2
+; RV64IZbb-NEXT: andi a1, a1, 15
+; RV64IZbb-NEXT: maxu a0, a0, a1
+; RV64IZbb-NEXT: sub a0, a0, a1
+; RV64IZbb-NEXT: ret
+ %a = mul i4 %y, %z
+ %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
+ ret i4 %tmp
+}
More information about the llvm-commits
mailing list