[llvm] 794551d - [RISCV][llvm] Support PSRA, PSRAI, PSRL, PSRLI codegen for P extension (#171460)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 10 23:58:32 PST 2025
Author: Brandon Wu
Date: 2025-12-11T15:58:27+08:00
New Revision: 794551dbffc0910c5b56660f663daacffa1bcdaa
URL: https://github.com/llvm/llvm-project/commit/794551dbffc0910c5b56660f663daacffa1bcdaa
DIFF: https://github.com/llvm/llvm-project/commit/794551dbffc0910c5b56660f663daacffa1bcdaa.diff
LOG: [RISCV][llvm] Support PSRA, PSRAI, PSRL, PSRLI codegen for P extension (#171460)
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoP.td
llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a9819c65c2170..2c0a02ae396c7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -526,7 +526,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU}, VTs, Legal);
setOperationAction({ISD::ABDS, ISD::ABDU}, VTs, Legal);
setOperationAction(ISD::SPLAT_VECTOR, VTs, Legal);
- setOperationAction(ISD::SHL, VTs, Custom);
+ setOperationAction({ISD::SHL, ISD::SRL, ISD::SRA}, VTs, Custom);
setOperationAction(ISD::BITCAST, VTs, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VTs, Custom);
}
@@ -8662,22 +8662,21 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::VSELECT:
return lowerToScalableOp(Op, DAG);
case ISD::SHL:
- if (Subtarget.enablePExtCodeGen() &&
- Op.getSimpleValueType().isFixedLengthVector()) {
- // We have patterns for scalar/immediate shift amount, so no lowering
- // needed.
- if (Op.getOperand(1)->getOpcode() == ISD::SPLAT_VECTOR)
- return Op;
-
- // There's no vector-vector version of shift instruction in P extension so
- // we need to unroll to scalar computation and pack them back.
- return DAG.UnrollVectorOp(Op.getNode());
- }
- [[fallthrough]];
- case ISD::SRA:
case ISD::SRL:
- if (Op.getSimpleValueType().isFixedLengthVector())
+ case ISD::SRA:
+ if (Op.getSimpleValueType().isFixedLengthVector()) {
+ if (Subtarget.enablePExtCodeGen()) {
+ // We have patterns for scalar/immediate shift amount, so no lowering
+ // needed.
+ if (Op.getOperand(1)->getOpcode() == ISD::SPLAT_VECTOR)
+ return Op;
+
+ // There's no vector-vector version of shift instruction in P extension
+ // so we need to unroll to scalar computation and pack them back.
+ return DAG.UnrollVectorOp(Op.getNode());
+ }
return lowerToScalableOp(Op, DAG);
+ }
// This can be called for an i32 shift amount that needs to be promoted.
assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index 7250a48bfe895..da4a3a6022337 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -1513,26 +1513,55 @@ let Predicates = [HasStdExtP] in {
def: Pat<(XLenVecI16VT (abds GPR:$rs1, GPR:$rs2)), (PABD_H GPR:$rs1, GPR:$rs2)>;
def: Pat<(XLenVecI16VT (abdu GPR:$rs1, GPR:$rs2)), (PABDU_H GPR:$rs1, GPR:$rs2)>;
- // 8-bit logical shift left patterns
+ // 8-bit logical shift left/right patterns
def: Pat<(XLenVecI8VT (shl GPR:$rs1, (XLenVecI8VT (splat_vector uimm3:$shamt)))),
(PSLLI_B GPR:$rs1, uimm3:$shamt)>;
+ def: Pat<(XLenVecI8VT (srl GPR:$rs1, (XLenVecI8VT (splat_vector uimm3:$shamt)))),
+ (PSRLI_B GPR:$rs1, uimm3:$shamt)>;
- // 16-bit logical shift left patterns
+ // 16-bit logical shift left/right patterns
def: Pat<(XLenVecI16VT (shl GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))),
(PSLLI_H GPR:$rs1, uimm4:$shamt)>;
+ def: Pat<(XLenVecI16VT (srl GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))),
+ (PSRLI_H GPR:$rs1, uimm4:$shamt)>;
+
+ // 8-bit arithmetic shift right patterns
+ def: Pat<(XLenVecI8VT (sra GPR:$rs1, (XLenVecI8VT (splat_vector uimm3:$shamt)))),
+ (PSRAI_B GPR:$rs1, uimm3:$shamt)>;
+
+ // 16-bit arithmetic shift right patterns
+ def: Pat<(XLenVecI16VT (sra GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))),
+ (PSRAI_H GPR:$rs1, uimm4:$shamt)>;
// 16-bit signed saturation shift left patterns
def: Pat<(XLenVecI16VT (sshlsat GPR:$rs1, (XLenVecI16VT (splat_vector uimm4:$shamt)))),
(PSSLAI_H GPR:$rs1, uimm4:$shamt)>;
- // 8-bit logical shift left
+ // 8-bit logical shift left/right
def: Pat<(XLenVecI8VT (shl GPR:$rs1,
(XLenVecI8VT (splat_vector (XLenVT GPR:$rs2))))),
(PSLL_BS GPR:$rs1, GPR:$rs2)>;
- // 16-bit logical shift left
+ def: Pat<(XLenVecI8VT (srl GPR:$rs1,
+ (XLenVecI8VT (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRL_BS GPR:$rs1, GPR:$rs2)>;
+
+ // 8-bit arithmetic shift left/right
+ def: Pat<(XLenVecI8VT (sra GPR:$rs1,
+ (XLenVecI8VT (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRA_BS GPR:$rs1, GPR:$rs2)>;
+
+ // 16-bit logical shift left/right
def: Pat<(XLenVecI16VT (shl GPR:$rs1,
(XLenVecI16VT (splat_vector (XLenVT GPR:$rs2))))),
(PSLL_HS GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (srl GPR:$rs1,
+ (XLenVecI16VT (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRL_HS GPR:$rs1, GPR:$rs2)>;
+
+ // 16-bit arithmetic shift left/right
+ def: Pat<(XLenVecI16VT (sra GPR:$rs1,
+ (XLenVecI16VT (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRA_HS GPR:$rs1, GPR:$rs2)>;
// 8-bit PLI SD node pattern
def: Pat<(XLenVecI8VT (splat_vector simm8_unsigned:$imm8)), (PLI_B simm8_unsigned:$imm8)>;
@@ -1580,16 +1609,28 @@ let Predicates = [HasStdExtP, IsRV64] in {
def: Pat<(v2i32 (riscv_pasub GPR:$rs1, GPR:$rs2)), (PASUB_W GPR:$rs1, GPR:$rs2)>;
def: Pat<(v2i32 (riscv_pasubu GPR:$rs1, GPR:$rs2)), (PASUBU_W GPR:$rs1, GPR:$rs2)>;
- // 32-bit logical shift left
+ // 32-bit logical shift left/right
def: Pat<(v2i32 (shl GPR:$rs1, (v2i32 (splat_vector (XLenVT GPR:$rs2))))),
(PSLL_WS GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(v2i32 (srl GPR:$rs1, (v2i32 (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRL_WS GPR:$rs1, GPR:$rs2)>;
+
+ // 32-bit arithmetic shift left/right
+ def: Pat<(v2i32 (sra GPR:$rs1, (v2i32 (splat_vector (XLenVT GPR:$rs2))))),
+ (PSRA_WS GPR:$rs1, GPR:$rs2)>;
// splat pattern
def: Pat<(v2i32 (splat_vector (XLenVT GPR:$rs2))), (PADD_WS (XLenVT X0), GPR:$rs2)>;
- // 32-bit logical shift left patterns
+ // 32-bit logical shift left/right patterns
def: Pat<(v2i32 (shl GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))),
(PSLLI_W GPR:$rs1, uimm5:$shamt)>;
+ def: Pat<(v2i32 (srl GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))),
+ (PSRLI_W GPR:$rs1, uimm5:$shamt)>;
+
+ // 32-bit arithmetic shift left/right patterns
+ def: Pat<(v2i32 (sra GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))),
+ (PSRAI_W GPR:$rs1, uimm5:$shamt)>;
// 32-bit signed saturation shift left patterns
def: Pat<(v2i32 (sshlsat GPR:$rs1, (v2i32 (splat_vector uimm5:$shamt)))),
diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
index cd59aa03597e2..1e1110f0a30b8 100644
--- a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
@@ -638,6 +638,60 @@ define void @test_psslai_h(ptr %ret_ptr, ptr %a_ptr) {
ret void
}
+; Test logical shift right immediate
+define void @test_psrli_h(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrli_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrli.h a1, a1, 2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %res = lshr <2 x i16> %a, splat(i16 2)
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrli_b(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrli_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrli.b a1, a1, 2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %res = lshr <4 x i8> %a, splat(i8 2)
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right immediate
+define void @test_psrai_h(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrai_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrai.h a1, a1, 2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %res = ashr <2 x i16> %a, splat(i16 2)
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrai_b(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrai_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrai.b a1, a1, 2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %res = ashr <4 x i8> %a, splat(i8 2)
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
; Test logical shift left(scalar shamt)
define void @test_psll_hs(ptr %ret_ptr, ptr %a_ptr, i16 %shamt) {
; CHECK-LABEL: test_psll_hs:
@@ -746,3 +800,243 @@ define void @test_psll_bs_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
store <4 x i8> %res, ptr %ret_ptr
ret void
}
+
+; Test logical shift right(scalar shamt)
+define void @test_psrl_hs(ptr %ret_ptr, ptr %a_ptr, i16 %shamt) {
+; CHECK-LABEL: test_psrl_hs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrl.hs a1, a1, a2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %insert = insertelement <2 x i16> poison, i16 %shamt, i32 0
+ %b = shufflevector <2 x i16> %insert, <2 x i16> poison, <2 x i32> zeroinitializer
+ %res = lshr <2 x i16> %a, %b
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrl_bs(ptr %ret_ptr, ptr %a_ptr, i8 %shamt) {
+; CHECK-LABEL: test_psrl_bs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psrl.bs a1, a1, a2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %insert = insertelement <4 x i8> poison, i8 %shamt, i32 0
+ %b = shufflevector <4 x i8> %insert, <4 x i8> poison, <4 x i32> zeroinitializer
+ %res = lshr <4 x i8> %a, %b
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right(scalar shamt)
+define void @test_psra_hs(ptr %ret_ptr, ptr %a_ptr, i16 %shamt) {
+; CHECK-LABEL: test_psra_hs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psra.hs a1, a1, a2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %insert = insertelement <2 x i16> poison, i16 %shamt, i32 0
+ %b = shufflevector <2 x i16> %insert, <2 x i16> poison, <2 x i32> zeroinitializer
+ %res = ashr <2 x i16> %a, %b
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psra_bs(ptr %ret_ptr, ptr %a_ptr, i8 %shamt) {
+; CHECK-LABEL: test_psra_bs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lw a1, 0(a1)
+; CHECK-NEXT: psra.bs a1, a1, a2
+; CHECK-NEXT: sw a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %insert = insertelement <4 x i8> poison, i8 %shamt, i32 0
+ %b = shufflevector <4 x i8> %insert, <4 x i8> poison, <4 x i32> zeroinitializer
+ %res = ashr <4 x i8> %a, %b
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test logical shift right(vector shamt)
+define void @test_psrl_hs_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-RV32-LABEL: test_psrl_hs_vec_shamt:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: lw a2, 0(a2)
+; CHECK-RV32-NEXT: lw a1, 0(a1)
+; CHECK-RV32-NEXT: srli a3, a2, 16
+; CHECK-RV32-NEXT: srli a4, a1, 16
+; CHECK-RV32-NEXT: slli a1, a1, 16
+; CHECK-RV32-NEXT: srl a3, a4, a3
+; CHECK-RV32-NEXT: srli a1, a1, 16
+; CHECK-RV32-NEXT: srl a1, a1, a2
+; CHECK-RV32-NEXT: pack a1, a1, a3
+; CHECK-RV32-NEXT: sw a1, 0(a0)
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: test_psrl_hs_vec_shamt:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: lw a2, 0(a2)
+; CHECK-RV64-NEXT: lw a1, 0(a1)
+; CHECK-RV64-NEXT: srli a3, a2, 16
+; CHECK-RV64-NEXT: srliw a4, a1, 16
+; CHECK-RV64-NEXT: slli a1, a1, 48
+; CHECK-RV64-NEXT: srl a3, a4, a3
+; CHECK-RV64-NEXT: srli a1, a1, 48
+; CHECK-RV64-NEXT: srl a1, a1, a2
+; CHECK-RV64-NEXT: ppaire.h a1, a1, a3
+; CHECK-RV64-NEXT: sw a1, 0(a0)
+; CHECK-RV64-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %b = load <2 x i16>, ptr %shamt_ptr
+ %res = lshr <2 x i16> %a, %b
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrl_bs_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-RV32-LABEL: test_psrl_bs_vec_shamt:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: lw a2, 0(a2)
+; CHECK-RV32-NEXT: lw a1, 0(a1)
+; CHECK-RV32-NEXT: srli a3, a2, 24
+; CHECK-RV32-NEXT: srli a4, a1, 24
+; CHECK-RV32-NEXT: srli a5, a2, 8
+; CHECK-RV32-NEXT: slli a6, a1, 16
+; CHECK-RV32-NEXT: srl a7, a4, a3
+; CHECK-RV32-NEXT: srli a3, a6, 24
+; CHECK-RV32-NEXT: srl a6, a3, a5
+; CHECK-RV32-NEXT: zext.b a3, a1
+; CHECK-RV32-NEXT: srli a4, a2, 16
+; CHECK-RV32-NEXT: slli a1, a1, 8
+; CHECK-RV32-NEXT: srl a2, a3, a2
+; CHECK-RV32-NEXT: srli a1, a1, 24
+; CHECK-RV32-NEXT: srl a3, a1, a4
+; CHECK-RV32-NEXT: ppaire.db a2, a2, a6
+; CHECK-RV32-NEXT: pack a1, a2, a3
+; CHECK-RV32-NEXT: sw a1, 0(a0)
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: test_psrl_bs_vec_shamt:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: lw a2, 0(a2)
+; CHECK-RV64-NEXT: lw a1, 0(a1)
+; CHECK-RV64-NEXT: srli a3, a2, 24
+; CHECK-RV64-NEXT: srliw a4, a1, 24
+; CHECK-RV64-NEXT: srli a5, a2, 16
+; CHECK-RV64-NEXT: srl a3, a4, a3
+; CHECK-RV64-NEXT: slli a4, a1, 40
+; CHECK-RV64-NEXT: srli a4, a4, 56
+; CHECK-RV64-NEXT: srl a4, a4, a5
+; CHECK-RV64-NEXT: zext.b a5, a1
+; CHECK-RV64-NEXT: srl a5, a5, a2
+; CHECK-RV64-NEXT: srli a2, a2, 8
+; CHECK-RV64-NEXT: slli a1, a1, 48
+; CHECK-RV64-NEXT: srli a1, a1, 56
+; CHECK-RV64-NEXT: srl a1, a1, a2
+; CHECK-RV64-NEXT: ppaire.b a2, a4, a3
+; CHECK-RV64-NEXT: ppaire.b a1, a5, a1
+; CHECK-RV64-NEXT: ppaire.h a1, a1, a2
+; CHECK-RV64-NEXT: sw a1, 0(a0)
+; CHECK-RV64-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %b = load <4 x i8>, ptr %shamt_ptr
+ %res = lshr <4 x i8> %a, %b
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right(vector shamt)
+define void @test_psra_hs_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-RV32-LABEL: test_psra_hs_vec_shamt:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: lw a2, 0(a2)
+; CHECK-RV32-NEXT: lw a1, 0(a1)
+; CHECK-RV32-NEXT: srli a3, a2, 16
+; CHECK-RV32-NEXT: srai a4, a1, 16
+; CHECK-RV32-NEXT: slli a1, a1, 16
+; CHECK-RV32-NEXT: sra a3, a4, a3
+; CHECK-RV32-NEXT: srai a1, a1, 16
+; CHECK-RV32-NEXT: sra a1, a1, a2
+; CHECK-RV32-NEXT: pack a1, a1, a3
+; CHECK-RV32-NEXT: sw a1, 0(a0)
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: test_psra_hs_vec_shamt:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: lw a2, 0(a2)
+; CHECK-RV64-NEXT: lw a1, 0(a1)
+; CHECK-RV64-NEXT: srli a3, a2, 16
+; CHECK-RV64-NEXT: sraiw a4, a1, 16
+; CHECK-RV64-NEXT: slli a1, a1, 48
+; CHECK-RV64-NEXT: sra a3, a4, a3
+; CHECK-RV64-NEXT: srai a1, a1, 48
+; CHECK-RV64-NEXT: sra a1, a1, a2
+; CHECK-RV64-NEXT: ppaire.h a1, a1, a3
+; CHECK-RV64-NEXT: sw a1, 0(a0)
+; CHECK-RV64-NEXT: ret
+ %a = load <2 x i16>, ptr %a_ptr
+ %b = load <2 x i16>, ptr %shamt_ptr
+ %res = ashr <2 x i16> %a, %b
+ store <2 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psra_bs_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-RV32-LABEL: test_psra_bs_vec_shamt:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: lw a2, 0(a2)
+; CHECK-RV32-NEXT: lw a1, 0(a1)
+; CHECK-RV32-NEXT: srli a3, a2, 24
+; CHECK-RV32-NEXT: srai a4, a1, 24
+; CHECK-RV32-NEXT: srli a5, a2, 8
+; CHECK-RV32-NEXT: slli a6, a1, 16
+; CHECK-RV32-NEXT: sra a7, a4, a3
+; CHECK-RV32-NEXT: srai a3, a6, 24
+; CHECK-RV32-NEXT: sra a6, a3, a5
+; CHECK-RV32-NEXT: srli a3, a2, 16
+; CHECK-RV32-NEXT: slli a4, a1, 8
+; CHECK-RV32-NEXT: slli a1, a1, 24
+; CHECK-RV32-NEXT: srai a4, a4, 24
+; CHECK-RV32-NEXT: sra a3, a4, a3
+; CHECK-RV32-NEXT: srai a1, a1, 24
+; CHECK-RV32-NEXT: sra a2, a1, a2
+; CHECK-RV32-NEXT: ppaire.db a2, a2, a6
+; CHECK-RV32-NEXT: pack a1, a2, a3
+; CHECK-RV32-NEXT: sw a1, 0(a0)
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: test_psra_bs_vec_shamt:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: lw a2, 0(a2)
+; CHECK-RV64-NEXT: lw a1, 0(a1)
+; CHECK-RV64-NEXT: srli a3, a2, 24
+; CHECK-RV64-NEXT: sraiw a4, a1, 24
+; CHECK-RV64-NEXT: srli a5, a2, 16
+; CHECK-RV64-NEXT: slli a6, a1, 40
+; CHECK-RV64-NEXT: sra a3, a4, a3
+; CHECK-RV64-NEXT: srli a4, a2, 8
+; CHECK-RV64-NEXT: srai a6, a6, 56
+; CHECK-RV64-NEXT: sra a5, a6, a5
+; CHECK-RV64-NEXT: slli a6, a1, 48
+; CHECK-RV64-NEXT: srai a6, a6, 56
+; CHECK-RV64-NEXT: sra a4, a6, a4
+; CHECK-RV64-NEXT: slli a1, a1, 56
+; CHECK-RV64-NEXT: srai a1, a1, 56
+; CHECK-RV64-NEXT: sra a1, a1, a2
+; CHECK-RV64-NEXT: ppaire.b a2, a5, a3
+; CHECK-RV64-NEXT: ppaire.b a1, a1, a4
+; CHECK-RV64-NEXT: ppaire.h a1, a1, a2
+; CHECK-RV64-NEXT: sw a1, 0(a0)
+; CHECK-RV64-NEXT: ret
+ %a = load <4 x i8>, ptr %a_ptr
+ %b = load <4 x i8>, ptr %shamt_ptr
+ %res = ashr <4 x i8> %a, %b
+ store <4 x i8> %res, ptr %ret_ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
index c7fb891cdd996..3e0f431d67f41 100644
--- a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
@@ -791,6 +791,86 @@ define void @test_pslli_w(ptr %ret_ptr, ptr %a_ptr) {
store <2 x i32> %res, ptr %ret_ptr
ret void
}
+; Test logical shift right immediate
+define void @test_psrli_w(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrli_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrli.w a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %res = lshr <2 x i32> %a, splat(i32 2)
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrli_h(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrli_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrli.h a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i16>, ptr %a_ptr
+ %res = lshr <4 x i16> %a, splat(i16 2)
+ store <4 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrli_b(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrli_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrli.b a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <8 x i8>, ptr %a_ptr
+ %res = lshr <8 x i8> %a, splat(i8 2)
+ store <8 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right immediate
+define void @test_psrai_w(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrai_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrai.w a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %res = ashr <2 x i32> %a, splat(i32 2)
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrai_h(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrai_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrai.h a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <4 x i16>, ptr %a_ptr
+ %res = ashr <4 x i16> %a, splat(i16 2)
+ store <4 x i16> %res, ptr %ret_ptr
+ ret void
+}
+
+define void @test_psrai_b(ptr %ret_ptr, ptr %a_ptr) {
+; CHECK-LABEL: test_psrai_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrai.b a1, a1, 2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <8 x i8>, ptr %a_ptr
+ %res = ashr <8 x i8> %a, splat(i8 2)
+ store <8 x i8> %res, ptr %ret_ptr
+ ret void
+}
+
; Test arithmetic saturation shift left immediate for v2i32
define void @test_psslai_w(ptr %ret_ptr, ptr %a_ptr) {
@@ -841,3 +921,75 @@ define void @test_psll_ws_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
store <2 x i32> %res, ptr %ret_ptr
ret void
}
+
+; Test logical shift right(scalar shamt)
+define void @test_psrl_ws(ptr %ret_ptr, ptr %a_ptr, i32 %shamt) {
+; CHECK-LABEL: test_psrl_ws:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psrl.ws a1, a1, a2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %insert = insertelement <2 x i32> poison, i32 %shamt, i32 0
+ %b = shufflevector <2 x i32> %insert, <2 x i32> poison, <2 x i32> zeroinitializer
+ %res = lshr <2 x i32> %a, %b
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right(scalar shamt)
+define void @test_psra_ws(ptr %ret_ptr, ptr %a_ptr, i32 %shamt) {
+; CHECK-LABEL: test_psra_ws:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: psra.ws a1, a1, a2
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %insert = insertelement <2 x i32> poison, i32 %shamt, i32 0
+ %b = shufflevector <2 x i32> %insert, <2 x i32> poison, <2 x i32> zeroinitializer
+ %res = ashr <2 x i32> %a, %b
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test logical shift right(vector shamt)
+define void @test_psrl_ws_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-LABEL: test_psrl_ws_vec_shamt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: ld a2, 0(a2)
+; CHECK-NEXT: srlw a3, a1, a2
+; CHECK-NEXT: srli a2, a2, 32
+; CHECK-NEXT: srli a1, a1, 32
+; CHECK-NEXT: srlw a1, a1, a2
+; CHECK-NEXT: pack a1, a3, a1
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %b = load <2 x i32>, ptr %shamt_ptr
+ %res = lshr <2 x i32> %a, %b
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
+
+; Test arithmetic shift right(vector shamt)
+define void @test_psra_ws_vec_shamt(ptr %ret_ptr, ptr %a_ptr, ptr %shamt_ptr) {
+; CHECK-LABEL: test_psra_ws_vec_shamt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld a1, 0(a1)
+; CHECK-NEXT: ld a2, 0(a2)
+; CHECK-NEXT: sraw a3, a1, a2
+; CHECK-NEXT: srli a2, a2, 32
+; CHECK-NEXT: srli a1, a1, 32
+; CHECK-NEXT: sraw a1, a1, a2
+; CHECK-NEXT: pack a1, a3, a1
+; CHECK-NEXT: sd a1, 0(a0)
+; CHECK-NEXT: ret
+ %a = load <2 x i32>, ptr %a_ptr
+ %b = load <2 x i32>, ptr %shamt_ptr
+ %res = ashr <2 x i32> %a, %b
+ store <2 x i32> %res, ptr %ret_ptr
+ ret void
+}
More information about the llvm-commits
mailing list