[llvm] [RISCV][SelectionDAG] Add a ISD::CTLS node for count leading redundant sign bits. Use it to select CLS(W). (PR #173417)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 29 10:13:30 PST 2025
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/173417
>From c5d9984c13f7b0acb19314446a51c4e09dad6c51 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 23 Dec 2025 10:53:30 -0800
Subject: [PATCH 1/2] Pre-commit tests
---
llvm/test/CodeGen/RISCV/rv32p.ll | 76 +++++++++++++++++++++++++++++++-
llvm/test/CodeGen/RISCV/rv64p.ll | 66 ++++++++++++++++++++++++++-
2 files changed, 140 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll
index 9e3cbeb787da0..c8a060c3018ac 100644
--- a/llvm/test/CodeGen/RISCV/rv32p.ll
+++ b/llvm/test/CodeGen/RISCV/rv32p.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-p,+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define i32 @abs_i32(i32 %x) {
@@ -97,3 +97,77 @@ define i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 %2) {
%8 = add i32 %7, %2
ret i32 %8
}
+
+define i8 @cls_i8(i8 %x) {
+; CHECK-LABEL: cls_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.b a1, a0
+; CHECK-NEXT: srli a1, a1, 7
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: zext.b a0, a0
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -25
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 7
+ %b = xor i8 %x, %a
+ %c = call i8 @llvm.ctlz.i8(i8 %b, i1 false)
+ %d = sub i8 %c, 1
+ ret i8 %d
+}
+
+define i16 @cls_i16(i16 %x) {
+; CHECK-LABEL: cls_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.h a1, a0
+; CHECK-NEXT: srli a1, a1, 15
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -17
+; CHECK-NEXT: ret
+ %a = ashr i16 %x, 15
+ %b = xor i16 %x, %a
+ %c = call i16 @llvm.ctlz.i16(i16 %b, i1 false)
+ %d = sub i16 %c, 1
+ ret i16 %d
+}
+
+define i32 @cls_i32(i32 %x) {
+; CHECK-LABEL: cls_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: srai a1, a0, 31
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: ret
+ %a = ashr i32 %x, 31
+ %b = xor i32 %x, %a
+ %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false)
+ %d = sub i32 %c, 1
+ ret i32 %d
+}
+
+define i64 @cls_i64(i64 %x) {
+; CHECK-LABEL: cls_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: srai a2, a1, 31
+; CHECK-NEXT: bne a1, a2, .LBB12_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: xor a0, a0, a2
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a1, a0, 32
+; CHECK-NEXT: j .LBB12_3
+; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: xor a1, a1, a2
+; CHECK-NEXT: clz a1, a1
+; CHECK-NEXT: .LBB12_3:
+; CHECK-NEXT: addi a0, a1, -1
+; CHECK-NEXT: snez a1, a1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: ret
+ %a = ashr i64 %x, 63
+ %b = xor i64 %x, %a
+ %c = call i64 @llvm.ctlz.i64(i64 %b, i1 false)
+ %d = sub i64 %c, 1
+ ret i64 %d
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll
index b778b1abde461..5ad7d0392bdb7 100644
--- a/llvm/test/CodeGen/RISCV/rv64p.ll
+++ b/llvm/test/CodeGen/RISCV/rv64p.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-p,+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s
define i32 @abs_i32(i32 %x) {
@@ -138,3 +138,67 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
%8 = or i64 %5, %7
ret i64 %8
}
+
+define i8 @cls_i8(i8 %x) {
+; CHECK-LABEL: cls_i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.b a1, a0
+; CHECK-NEXT: srli a1, a1, 7
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: zext.b a0, a0
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -57
+; CHECK-NEXT: ret
+ %a = ashr i8 %x, 7
+ %b = xor i8 %x, %a
+ %c = call i8 @llvm.ctlz.i8(i8 %b, i1 false)
+ %d = sub i8 %c, 1
+ ret i8 %d
+}
+
+define i16 @cls_i16(i16 %x) {
+; CHECK-LABEL: cls_i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sext.h a1, a0
+; CHECK-NEXT: srli a1, a1, 15
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: zext.h a0, a0
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -49
+; CHECK-NEXT: ret
+ %a = ashr i16 %x, 15
+ %b = xor i16 %x, %a
+ %c = call i16 @llvm.ctlz.i16(i16 %b, i1 false)
+ %d = sub i16 %c, 1
+ ret i16 %d
+}
+
+define i32 @cls_i32(i32 %x) {
+; CHECK-LABEL: cls_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: sraiw a1, a0, 31
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: clzw a0, a0
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: ret
+ %a = ashr i32 %x, 31
+ %b = xor i32 %x, %a
+ %c = call i32 @llvm.ctlz.i32(i32 %b, i1 false)
+ %d = sub i32 %c, 1
+ ret i32 %d
+}
+
+define i64 @cls_i64(i64 %x) {
+; CHECK-LABEL: cls_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: srai a1, a0, 63
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: clz a0, a0
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: ret
+ %a = ashr i64 %x, 63
+ %b = xor i64 %x, %a
+ %c = call i64 @llvm.ctlz.i64(i64 %b, i1 false)
+ %d = sub i64 %c, 1
+ ret i64 %d
+}
>From 6f4a590ae2773cfc9aea21115b300e9075a883b1 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 23 Dec 2025 11:05:19 -0800
Subject: [PATCH 2/2] [RISCV][SelectionDAG] Add a ISD::CTLS node for count
leading redundant sign bits. Use it to select CLS(W).
The P extension adds an instruction equivalent to __builtin_clrsb. I
believe AArch64 has a similar instruction that we currently fail to match
for with the builtin.
This patch adds a combine based on the canonical version of the pattern
emitted by clang for the builtin, (add (ctlz (xor x, (sra x, bw-1))), -1).
I'm starting the combine at the ctlz because the outer add can easily be
combined into other nodes obscuring the full pattern. So we generate
(add (ctls x), 1) and hope the add will be combined away.
I've only enabled the combine when the target has a Legal or Custom
action for the operation, taking into account type promotion. We
can relax this in the future by adding a default expansion to LegalizeDAG
and adding more type legalization rules.
---
llvm/include/llvm/CodeGen/ISDOpcodes.h | 4 +++
.../include/llvm/Target/TargetSelectionDAG.td | 1 +
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 26 +++++++++++++++++
.../SelectionDAG/LegalizeIntegerTypes.cpp | 14 ++++++++++
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 +
.../SelectionDAG/SelectionDAGDumper.cpp | 1 +
llvm/lib/CodeGen/TargetLoweringBase.cpp | 1 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 27 +++++++++++++++---
llvm/lib/Target/RISCV/RISCVInstrInfoP.td | 3 ++
llvm/test/CodeGen/RISCV/rv32p.ll | 27 ++++++------------
llvm/test/CodeGen/RISCV/rv64p.ll | 28 ++++++-------------
11 files changed, 91 insertions(+), 42 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 27a9019a258f8..c16a1018e118f 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -781,6 +781,10 @@ enum NodeType {
CTTZ_ZERO_UNDEF,
CTLZ_ZERO_UNDEF,
+ /// Count leading redundant sign bits. Equivalent to
+ /// (sub (ctlz (x < 0 ? ~x : x)), 1).
+ CTLS,
+
/// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
/// i1 then the high bits must conform to getBooleanContents.
SELECT,
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 994bb61697617..46cbde939e58c 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -500,6 +500,7 @@ def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
def ctlz : SDNode<"ISD::CTLZ" , SDTIntBitCountUnaryOp>;
def cttz : SDNode<"ISD::CTTZ" , SDTIntBitCountUnaryOp>;
def ctpop : SDNode<"ISD::CTPOP" , SDTIntBitCountUnaryOp>;
+def ctls : SDNode<"ISD::CTLS" , SDTIntBitCountUnaryOp>;
def ctlz_zero_undef : SDNode<"ISD::CTLZ_ZERO_UNDEF", SDTIntBitCountUnaryOp>;
def cttz_zero_undef : SDNode<"ISD::CTTZ_ZERO_UNDEF", SDTIntBitCountUnaryOp>;
def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 99af0aeae7d73..1b90bcd8b1a20 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -633,6 +633,8 @@ namespace {
SDValue foldAddToAvg(SDNode *N, const SDLoc &DL);
SDValue foldSubToAvg(SDNode *N, const SDLoc &DL);
+ SDValue foldCTLZToCTLS(SDValue Src, const SDLoc &DL);
+
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
@@ -11821,6 +11823,27 @@ SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {
return SDValue();
}
+// Fold (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1).
+SDValue DAGCombiner::foldCTLZToCTLS(SDValue Src, const SDLoc &DL) {
+ EVT VT = Src.getValueType();
+
+ auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
+ if (!(LK.first == TargetLoweringBase::TypeLegal ||
+ LK.first == TargetLoweringBase::TypePromoteInteger) ||
+ !TLI.isOperationLegalOrCustom(ISD::CTLS, LK.second))
+ return SDValue();
+
+ unsigned BitWidth = VT.getScalarSizeInBits();
+
+ SDValue X;
+ if (!sd_match(Src, m_Xor(m_Value(X),
+ m_Sra(m_Deferred(X), m_SpecificInt(BitWidth - 1)))))
+ return SDValue();
+
+ SDValue Res = DAG.getNode(ISD::CTLS, DL, VT, X);
+ return DAG.getNode(ISD::ADD, DL, VT, Res, DAG.getConstant(1, DL, VT));
+}
+
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -11835,6 +11858,9 @@ SDValue DAGCombiner::visitCTLZ(SDNode *N) {
if (DAG.isKnownNeverZero(N0))
return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, DL, VT, N0);
+ if (SDValue V = foldCTLZToCTLS(N0, DL))
+ return V;
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 67c4eccec962c..433ea2421dfdb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -70,6 +70,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: Res = PromoteIntRes_CTLZ(N); break;
+ case ISD::CTLS: Res = PromoteIntRes_CTLS(N); break;
case ISD::PARITY:
case ISD::VP_CTPOP:
case ISD::CTPOP: Res = PromoteIntRes_CTPOP_PARITY(N); break;
@@ -772,6 +773,19 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
llvm_unreachable("Invalid CTLZ Opcode");
}
+SDValue DAGTypeLegalizer::PromoteIntRes_CTLS(SDNode *N) {
+ EVT OVT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
+ SDLoc dl(N);
+
+ SDValue ExtractLeadingBits = DAG.getConstant(
+ NVT.getScalarSizeInBits() - OVT.getScalarSizeInBits(), dl, NVT);
+
+ SDValue Op = SExtPromotedInteger(N->getOperand(0));
+ return DAG.getNode(ISD::SUB, dl, NVT, DAG.getNode(ISD::CTLS, dl, NVT, Op),
+ ExtractLeadingBits);
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP_PARITY(SDNode *N) {
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index f2d62a073c17a..cd58c8ab1c3e4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -326,6 +326,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
SDValue PromoteIntRes_Constant(SDNode *N);
SDValue PromoteIntRes_CTLZ(SDNode *N);
+ SDValue PromoteIntRes_CTLS(SDNode *N);
SDValue PromoteIntRes_CTPOP_PARITY(SDNode *N);
SDValue PromoteIntRes_CTTZ(SDNode *N);
SDValue PromoteIntRes_VP_CttzElements(SDNode *N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index ec5edd5f13978..7b24db6cd09d6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -512,6 +512,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::CTTZ_ZERO_UNDEF: return "cttz_zero_undef";
case ISD::CTLZ: return "ctlz";
case ISD::CTLZ_ZERO_UNDEF: return "ctlz_zero_undef";
+ case ISD::CTLS: return "ctls";
case ISD::PARITY: return "parity";
// Trampolines
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index c4b054fd79f1b..16a5dcbe040a0 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1125,6 +1125,7 @@ void TargetLoweringBase::initActions() {
// These default to Expand so they will be expanded to CTLZ/CTTZ by default.
setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
Expand);
+ setOperationAction(ISD::CTLS, VT, Expand);
setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7dfb5cd0a9e6c..58951da19103f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -446,6 +446,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTLZ, XLenVT, Expand);
}
+ if (Subtarget.hasStdExtP()) {
+ setOperationAction(ISD::CTLS, XLenVT, Legal);
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::CTLS, MVT::i32, Custom);
+ }
+
if (Subtarget.hasStdExtP() ||
(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
setOperationAction(ISD::ABS, XLenVT, Legal);
@@ -15042,16 +15048,29 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
- case ISD::CTLZ_ZERO_UNDEF: {
+ case ISD::CTLZ_ZERO_UNDEF:
+ case ISD::CTLS: {
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
SDValue NewOp0 =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
- bool IsCTZ =
- N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected opcode");
+ case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
+ Opc = RISCVISD::CTZW;
+ break;
+ case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
+ Opc = RISCVISD::CLZW;
+ break;
+ case ISD::CTLS:
+ Opc = RISCVISD::CLSW;
+ break;
+ }
- unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
return;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index 060161f3a8fa0..4e022f46a8ceb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -1462,6 +1462,7 @@ let Predicates = [HasStdExtP, IsRV32] in {
//===----------------------------------------------------------------------===//
def riscv_absw : RVSDNode<"ABSW", SDT_RISCVIntUnaryOpW>;
+def riscv_clsw : RVSDNode<"CLSW", SDT_RISCVIntUnaryOpW>;
def SDT_RISCVPBinOp : SDTypeProfile<1, 2, [SDTCisVec<0>,
SDTCisInt<0>,
@@ -1473,6 +1474,7 @@ def riscv_pmulhsu : RVSDNode<"PMULHSU", SDT_RISCVPBinOp>;
let Predicates = [HasStdExtP] in {
def : PatGpr<abs, ABS>;
+ def : PatGpr<ctls, CLS>;
// Basic 8-bit arithmetic patterns
def: Pat<(XLenVecI8VT (add GPR:$rs1, GPR:$rs2)), (PADD_B GPR:$rs1, GPR:$rs2)>;
@@ -1593,6 +1595,7 @@ let Predicates = [HasStdExtP, IsRV32] in {
let Predicates = [HasStdExtP, IsRV64] in {
def : PatGpr<riscv_absw, ABSW>;
+ def : PatGpr<riscv_clsw, CLSW>;
// 32-bit PLI SD node pattern
def: Pat<(v2i32 (splat_vector simm10:$imm10)), (PLI_W simm10:$imm10)>;
diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll
index c8a060c3018ac..12040f72e7f50 100644
--- a/llvm/test/CodeGen/RISCV/rv32p.ll
+++ b/llvm/test/CodeGen/RISCV/rv32p.ll
@@ -101,12 +101,9 @@ define i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 %2) {
define i8 @cls_i8(i8 %x) {
; CHECK-LABEL: cls_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: sext.b a1, a0
-; CHECK-NEXT: srli a1, a1, 7
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: zext.b a0, a0
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -25
+; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: cls a0, a0
+; CHECK-NEXT: addi a0, a0, -24
; CHECK-NEXT: ret
%a = ashr i8 %x, 7
%b = xor i8 %x, %a
@@ -118,12 +115,9 @@ define i8 @cls_i8(i8 %x) {
define i16 @cls_i16(i16 %x) {
; CHECK-LABEL: cls_i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: sext.h a1, a0
-; CHECK-NEXT: srli a1, a1, 15
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: zext.h a0, a0
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -17
+; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: cls a0, a0
+; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: ret
%a = ashr i16 %x, 15
%b = xor i16 %x, %a
@@ -135,10 +129,7 @@ define i16 @cls_i16(i16 %x) {
define i32 @cls_i32(i32 %x) {
; CHECK-LABEL: cls_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: srai a1, a0, 31
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: cls a0, a0
; CHECK-NEXT: ret
%a = ashr i32 %x, 31
%b = xor i32 %x, %a
@@ -158,8 +149,8 @@ define i64 @cls_i64(i64 %x) {
; CHECK-NEXT: addi a1, a0, 32
; CHECK-NEXT: j .LBB12_3
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: xor a1, a1, a2
-; CHECK-NEXT: clz a1, a1
+; CHECK-NEXT: cls a1, a1
+; CHECK-NEXT: addi a1, a1, 1
; CHECK-NEXT: .LBB12_3:
; CHECK-NEXT: addi a0, a1, -1
; CHECK-NEXT: snez a1, a1
diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll
index 5ad7d0392bdb7..4356d2589bda4 100644
--- a/llvm/test/CodeGen/RISCV/rv64p.ll
+++ b/llvm/test/CodeGen/RISCV/rv64p.ll
@@ -142,12 +142,9 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
define i8 @cls_i8(i8 %x) {
; CHECK-LABEL: cls_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: sext.b a1, a0
-; CHECK-NEXT: srli a1, a1, 7
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: zext.b a0, a0
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -57
+; CHECK-NEXT: sext.b a0, a0
+; CHECK-NEXT: cls a0, a0
+; CHECK-NEXT: addi a0, a0, -56
; CHECK-NEXT: ret
%a = ashr i8 %x, 7
%b = xor i8 %x, %a
@@ -159,12 +156,9 @@ define i8 @cls_i8(i8 %x) {
define i16 @cls_i16(i16 %x) {
; CHECK-LABEL: cls_i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: sext.h a1, a0
-; CHECK-NEXT: srli a1, a1, 15
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: zext.h a0, a0
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -49
+; CHECK-NEXT: sext.h a0, a0
+; CHECK-NEXT: cls a0, a0
+; CHECK-NEXT: addi a0, a0, -48
; CHECK-NEXT: ret
%a = ashr i16 %x, 15
%b = xor i16 %x, %a
@@ -176,10 +170,7 @@ define i16 @cls_i16(i16 %x) {
define i32 @cls_i32(i32 %x) {
; CHECK-LABEL: cls_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: sraiw a1, a0, 31
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: clzw a0, a0
-; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: clsw a0, a0
; CHECK-NEXT: ret
%a = ashr i32 %x, 31
%b = xor i32 %x, %a
@@ -191,10 +182,7 @@ define i32 @cls_i32(i32 %x) {
define i64 @cls_i64(i64 %x) {
; CHECK-LABEL: cls_i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: srai a1, a0, 63
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: cls a0, a0
; CHECK-NEXT: ret
%a = ashr i64 %x, 63
%b = xor i64 %x, %a
More information about the llvm-commits
mailing list