[llvm] [RISCV] Codegen support for XCVmem extension (PR #76916)
Liao Chunyu via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 3 23:39:28 PST 2024
https://github.com/ChunyuLiao created https://github.com/llvm/llvm-project/pull/76916
All post-Increment load/store, register-register load/store
spec:
https://github.com/openhwgroup/cv32e40p/blob/master/docs/source/instruction_set_extensions.rst
>From 795a445d272449849e540ff7b101c2855955d82f Mon Sep 17 00:00:00 2001
From: Liao Chunyu <chunyu at iscas.ac.cn>
Date: Fri, 29 Dec 2023 07:28:39 -0500
Subject: [PATCH] [RISCV] Codegen support for XCVmem extension
All post-Increment load/store, register-register load/store
spec:
https://github.com/openhwgroup/cv32e40p/blob/master/docs/source/instruction_set_extensions.rst
---
.../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 74 +++++
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h | 2 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 30 ++
llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td | 45 ++-
llvm/test/CodeGen/RISCV/xcvmem.ll | 295 ++++++++++++++++++
6 files changed, 446 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/xcvmem.ll
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 4759aa951664c7..b56a9a63bae51a 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -1210,7 +1210,7 @@ struct RISCVOperand final : public MCParsedAsmOperand {
}
void addRegRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
+ assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(RegReg.Reg1));
Inst.addOperand(MCOperand::createReg(RegReg.Reg2));
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index bfa3bf3cc74e2b..3ba3000145784a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1417,6 +1417,67 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case ISD::LOAD: {
if (tryIndexedLoad(Node))
return;
+
+ if (Subtarget->hasVendorXCVmem()) {
+ // We hase to match post-incrementing load here
+ LoadSDNode *Load = cast<LoadSDNode>(Node);
+ if (Load->getAddressingMode() != ISD::POST_INC)
+ break;
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue Base = Node->getOperand(1);
+ SDValue Offset = Node->getOperand(2);
+
+ bool simm12 = false;
+ bool signExtend = Load->getExtensionType() == ISD::SEXTLOAD;
+
+ if (auto ConstantOffset = dyn_cast<ConstantSDNode>(Offset)) {
+ int ConstantVal = ConstantOffset->getSExtValue();
+ simm12 = isInt<12>(ConstantVal);
+ if (simm12)
+ Offset = CurDAG->getTargetConstant(ConstantVal, SDLoc(Offset),
+ Offset.getValueType());
+ }
+
+ unsigned Opcode = 0;
+ switch (Load->getMemoryVT().getSimpleVT().SimpleTy) {
+ case MVT::i8:
+ if (simm12 && signExtend)
+ Opcode = RISCV::CV_LB_ri_inc;
+ else if (simm12 && !signExtend)
+ Opcode = RISCV::CV_LBU_ri_inc;
+ else if (!simm12 && signExtend)
+ Opcode = RISCV::CV_LB_rr_inc;
+ else
+ Opcode = RISCV::CV_LBU_rr_inc;
+ break;
+ case MVT::i16:
+ if (simm12 && signExtend)
+ Opcode = RISCV::CV_LH_ri_inc;
+ else if (simm12 && !signExtend)
+ Opcode = RISCV::CV_LHU_ri_inc;
+ else if (!simm12 && signExtend)
+ Opcode = RISCV::CV_LH_rr_inc;
+ else
+ Opcode = RISCV::CV_LHU_rr_inc;
+ break;
+ case MVT::i32:
+ if (simm12)
+ Opcode = RISCV::CV_LW_ri_inc;
+ else
+ Opcode = RISCV::CV_LW_rr_inc;
+ break;
+ default:
+ break;
+ }
+ if (!Opcode)
+ break;
+
+ ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, XLenVT, XLenVT,
+ Chain.getSimpleValueType(), Base,
+ Offset, Chain));
+ return;
+ }
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
@@ -2544,6 +2605,19 @@ bool RISCVDAGToDAGISel::SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base,
return true;
}
+bool RISCVDAGToDAGISel::SelectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() != ISD::ADD)
+ return false;
+
+ if (isa<ConstantSDNode>(Addr.getOperand(1)))
+ return false;
+
+ Base = Addr.getOperand(1);
+ Offset = Addr.getOperand(0);
+ return true;
+}
+
bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
SDValue &ShAmt) {
ShAmt = N;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 77e174135a599f..c813acf7325fec 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -80,6 +80,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
return false;
}
+ bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset);
+
bool tryShrinkShlLogicImm(SDNode *Node);
bool trySignedBitfieldExtract(SDNode *Node);
bool tryIndexedLoad(SDNode *Node);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c8a94adcd91c6a..bdee4725243045 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1366,6 +1366,16 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
}
+ if (Subtarget.hasVendorXCVmem()) {
+ setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
+
+ setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
+ }
+
// Function alignments.
const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4);
setMinFunctionAlignment(FunctionAlignment);
@@ -19324,6 +19334,26 @@ bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const {
+ if (Subtarget.hasVendorXCVmem()) {
+ if (Op->getOpcode() != ISD::ADD)
+ return false;
+
+ if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
+ Base = LS->getBasePtr();
+ } else {
+ return false;
+ }
+
+ if (Base == Op->getOperand(0)) {
+ Offset = Op->getOperand(1);
+ } else if (Base == Op->getOperand(1)) {
+ Offset = Op->getOperand(0);
+ } else {
+ return false;
+ }
+ AM = ISD::POST_INC;
+ return true;
+ }
EVT VT;
SDValue Ptr;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
index 924e91e15c348f..228efe6e28567a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
@@ -512,11 +512,13 @@ def CVrrAsmOperand : AsmOperandClass {
let DiagnosticType = "InvalidRegReg";
}
-def CVrr : Operand<OtherVT> {
+def CVrr : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrRegReg",[]> {
let ParserMatchClass = CVrrAsmOperand;
let EncoderMethod = "getRegReg";
let DecoderMethod = "decodeRegReg";
let PrintMethod = "printRegReg";
+ let MIOperandInfo = (ops GPR:$base, GPR:$offset);
}
class CVLoad_ri_inc<bits<3> funct3, string opcodestr>
@@ -659,6 +661,47 @@ let Predicates = [HasVendorXCVelw, IsRV32], hasSideEffects = 0,
def CV_ELW : CVLoad_ri<0b011, "cv.elw">;
}
+//===----------------------------------------------------------------------===//
+// Patterns for load & store operations
+//===----------------------------------------------------------------------===//
+class CVLdrrPat<PatFrag LoadOp, RVInst Inst>
+ : Pat<(XLenVT (LoadOp CVrr:$regreg)),
+ (Inst CVrr:$regreg)>;
+
+class CVStriPat<PatFrag StoreOp, RVInst Inst>
+ : Pat<(StoreOp (XLenVT GPR:$rs2), GPR:$rs1, simm12:$imm12),
+ (Inst GPR:$rs2, GPR:$rs1, simm12:$imm12)>;
+
+class CVStrriPat<PatFrag StoreOp, RVInst Inst>
+ : Pat<(StoreOp (XLenVT GPR:$rs2), GPR:$rs1, GPR:$rs3),
+ (Inst GPR:$rs2, GPR:$rs1, GPR:$rs3)>;
+
+class CVStrrPat<PatFrag StoreOp, RVInst Inst>
+ : Pat<(StoreOp (XLenVT GPR:$rs2), CVrr:$regreg),
+ (Inst GPR:$rs2, CVrr:$regreg)>;
+
+let Predicates = [HasVendorXCVmem, IsRV32], AddedComplexity = 1 in {
+ def : CVLdrrPat<sextloadi8, CV_LB_rr>;
+ def : CVLdrrPat<zextloadi8, CV_LBU_rr>;
+ def : CVLdrrPat<extloadi8, CV_LBU_rr>;
+ def : CVLdrrPat<sextloadi16, CV_LH_rr>;
+ def : CVLdrrPat<zextloadi16, CV_LHU_rr>;
+ def : CVLdrrPat<extloadi16, CV_LHU_rr>;
+ def : CVLdrrPat<load, CV_LW_rr>;
+
+ def : CVStriPat<post_truncsti8, CV_SB_ri_inc>;
+ def : CVStriPat<post_truncsti16, CV_SH_ri_inc>;
+ def : CVStriPat<post_store, CV_SW_ri_inc>;
+
+ def : CVStrriPat<post_truncsti8, CV_SB_rr_inc>;
+ def : CVStrriPat<post_truncsti16, CV_SH_ri_inc>;
+ def : CVStrriPat<post_store, CV_SW_rr_inc>;
+
+ def : CVStrrPat<truncstorei8, CV_SB_rr>;
+ def : CVStrrPat<truncstorei16, CV_SH_rr>;
+ def : CVStrrPat<store, CV_SW_rr>;
+}
+
def cv_tuimm2 : TImmLeaf<XLenVT, [{return isUInt<2>(Imm);}]>;
def cv_tuimm5 : TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]>;
def cv_uimm10 : ImmLeaf<XLenVT, [{return isUInt<10>(Imm);}]>;
diff --git a/llvm/test/CodeGen/RISCV/xcvmem.ll b/llvm/test/CodeGen/RISCV/xcvmem.ll
new file mode 100644
index 00000000000000..037e49b9b0df7d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xcvmem.ll
@@ -0,0 +1,295 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=riscv32 -mattr=+xcvmem -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK
+
+define <2 x i32> @lb_ri_inc(i8* %a) {
+; CHECK-LABEL: lb_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lb a1, (a0), 42
+; CHECK-NEXT: ret
+ %1 = load i8, i8* %a
+ %2 = sext i8 %1 to i32
+ %3 = getelementptr i8, i8* %a, i32 42
+ %4 = ptrtoint i8* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define <2 x i32> @lb_rr_inc(i8* %a, i32 %b) {
+; CHECK-LABEL: lb_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lb a1, (a0), a1
+; CHECK-NEXT: ret
+ %1 = load i8, i8* %a
+ %2 = sext i8 %1 to i32
+ %3 = getelementptr i8, i8* %a, i32 %b
+ %4 = ptrtoint i8* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define i32 @lb_rr(i8* %a, i32 %b) {
+; CHECK-LABEL: lb_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lb a0, a1(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, i8* %a, i32 %b
+ %2 = load i8, i8* %1
+ %3 = sext i8 %2 to i32
+ ret i32 %3
+}
+
+define <2 x i32> @lbu_ri_inc(i8* %a) {
+; CHECK-LABEL: lbu_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lbu a1, (a0), 42
+; CHECK-NEXT: ret
+ %1 = load i8, i8* %a
+ %2 = zext i8 %1 to i32
+ %3 = getelementptr i8, i8* %a, i32 42
+ %4 = ptrtoint i8* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define <2 x i32> @lbu_rr_inc(i8* %a, i32 %b) {
+; CHECK-LABEL: lbu_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lbu a1, (a0), a1
+; CHECK-NEXT: ret
+ %1 = load i8, i8* %a
+ %2 = zext i8 %1 to i32
+ %3 = getelementptr i8, i8* %a, i32 %b
+ %4 = ptrtoint i8* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define i32 @lbu_rr(i8* %a, i32 %b) {
+; CHECK-LABEL: lbu_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lbu a0, a1(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, i8* %a, i32 %b
+ %2 = load i8, i8* %1
+ %3 = zext i8 %2 to i32
+ ret i32 %3
+}
+
+define <2 x i32> @lh_ri_inc(i16* %a) {
+; CHECK-LABEL: lh_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lh a1, (a0), 84
+; CHECK-NEXT: ret
+ %1 = load i16, i16* %a
+ %2 = sext i16 %1 to i32
+ %3 = getelementptr i16, i16* %a, i32 42
+ %4 = ptrtoint i16* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define <2 x i32> @lh_rr_inc(i16* %a, i32 %b) {
+; CHECK-LABEL: lh_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: cv.lh a1, (a0), a1
+; CHECK-NEXT: ret
+ %1 = load i16, i16* %a
+ %2 = sext i16 %1 to i32
+ %3 = getelementptr i16, i16* %a, i32 %b
+ %4 = ptrtoint i16* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define i32 @lh_rr(i16* %a, i32 %b) {
+; CHECK-LABEL: lh_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: cv.lh a0, a1(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, i16* %a, i32 %b
+ %2 = load i16, i16* %1
+ %3 = sext i16 %2 to i32
+ ret i32 %3
+}
+
+define <2 x i32> @lhu_ri_inc(i16* %a) {
+; CHECK-LABEL: lhu_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lhu a1, (a0), 84
+; CHECK-NEXT: ret
+ %1 = load i16, i16* %a
+ %2 = zext i16 %1 to i32
+ %3 = getelementptr i16, i16* %a, i32 42
+ %4 = ptrtoint i16* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define <2 x i32> @lhu_rr_inc(i16* %a, i32 %b) {
+; CHECK-LABEL: lhu_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: cv.lhu a1, (a0), a1
+; CHECK-NEXT: ret
+ %1 = load i16, i16* %a
+ %2 = zext i16 %1 to i32
+ %3 = getelementptr i16, i16* %a, i32 %b
+ %4 = ptrtoint i16* %3 to i32
+ %5 = insertelement <2 x i32> undef, i32 %4, i32 0
+ %6 = insertelement <2 x i32> %5, i32 %2, i32 1
+ ret <2 x i32> %6
+}
+
+define i32 @lhu_rr(i16* %a, i32 %b) {
+; CHECK-LABEL: lhu_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: cv.lhu a0, a1(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, i16* %a, i32 %b
+ %2 = load i16, i16* %1
+ %3 = zext i16 %2 to i32
+ ret i32 %3
+}
+
+define <2 x i32> @lw_ri_inc(i32* %a) {
+; CHECK-LABEL: lw_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.lw a1, (a0), 168
+; CHECK-NEXT: ret
+ %1 = load i32, i32* %a
+ %2 = getelementptr i32, i32* %a, i32 42
+ %3 = ptrtoint i32* %2 to i32
+ %4 = insertelement <2 x i32> undef, i32 %3, i32 0
+ %5 = insertelement <2 x i32> %4, i32 %1, i32 1
+ ret <2 x i32> %5
+}
+
+define <2 x i32> @lw_rr_inc(i32* %a, i32 %b) {
+; CHECK-LABEL: lw_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 2
+; CHECK-NEXT: cv.lw a1, (a0), a1
+; CHECK-NEXT: ret
+ %1 = load i32, i32* %a
+ %2 = getelementptr i32, i32* %a, i32 %b
+ %3 = ptrtoint i32* %2 to i32
+ %4 = insertelement <2 x i32> undef, i32 %3, i32 0
+ %5 = insertelement <2 x i32> %4, i32 %1, i32 1
+ ret <2 x i32> %5
+}
+
+define i32 @lw_rr(i32* %a, i32 %b) {
+; CHECK-LABEL: lw_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a1, a1, 2
+; CHECK-NEXT: cv.lw a0, a1(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i32, i32* %a, i32 %b
+ %2 = load i32, i32* %1
+ ret i32 %2
+}
+
+define i8* @sb_ri_inc(i8* %a, i8 %b) {
+; CHECK-LABEL: sb_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sb a1, (a0), 42
+; CHECK-NEXT: ret
+ store i8 %b, i8* %a
+ %1 = getelementptr i8, i8* %a, i32 42
+ ret i8* %1
+}
+
+define i8* @sb_rr_inc(i8* %a, i8 %b, i32 %c) {
+; CHECK-LABEL: sb_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sb a1, (a0), a2
+; CHECK-NEXT: ret
+ store i8 %b, i8* %a
+ %1 = getelementptr i8, i8* %a, i32 %c
+ ret i8* %1
+}
+
+define void @sb_rr(i8* %a, i8 %b, i32 %c) {
+; CHECK-LABEL: sb_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sb a1, a2(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, i8* %a, i32 %c
+ store i8 %b, i8* %1
+ ret void
+}
+
+define i16* @sh_ri_inc(i16* %a, i16 %b) {
+; CHECK-LABEL: sh_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sh a1, (a0), 84
+; CHECK-NEXT: ret
+ store i16 %b, i16* %a
+ %1 = getelementptr i16, i16* %a, i32 42
+ ret i16* %1
+}
+
+define i16* @sh_rr_inc(i16* %a, i16 %b, i32 %c) {
+; CHECK-LABEL: sh_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: cv.sh a1, (a0), a2
+; CHECK-NEXT: ret
+ store i16 %b, i16* %a
+ %1 = getelementptr i16, i16* %a, i32 %c
+ ret i16* %1
+}
+
+define void @sh_rr(i16* %a, i16 %b, i32 %c) {
+; CHECK-LABEL: sh_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: cv.sh a1, a2(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, i16* %a, i32 %c
+ store i16 %b, i16* %1
+ ret void
+}
+
+define i32* @sw_ri_inc(i32* %a, i32 %b) {
+; CHECK-LABEL: sw_ri_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: cv.sw a1, (a0), 168
+; CHECK-NEXT: ret
+ store i32 %b, i32* %a
+ %1 = getelementptr i32, i32* %a, i32 42
+ ret i32* %1
+}
+
+define i32* @sw_rr_inc(i32* %a, i32 %b, i32 %c) {
+; CHECK-LABEL: sw_rr_inc:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a2, a2, 2
+; CHECK-NEXT: cv.sw a1, (a0), a2
+; CHECK-NEXT: ret
+ store i32 %b, i32* %a
+ %1 = getelementptr i32, i32* %a, i32 %c
+ ret i32* %1
+}
+
+define void @sw_rr(i32* %a, i32 %b, i32 %c) {
+; CHECK-LABEL: sw_rr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a2, a2, 2
+; CHECK-NEXT: cv.sw a1, a2(a0)
+; CHECK-NEXT: ret
+ %1 = getelementptr i32, i32* %a, i32 %c
+ store i32 %b, i32* %1
+ ret void
+}
More information about the llvm-commits
mailing list