[llvm] r318737 - [RISCV] Support and tests for a variety of additional LLVM IR constructs

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 21 00:11:03 PST 2017


Author: asb
Date: Tue Nov 21 00:11:03 2017
New Revision: 318737

URL: http://llvm.org/viewvc/llvm-project?rev=318737&view=rev
Log:
[RISCV] Support and tests for a variety of additional LLVM IR constructs

Previous patches primarily ensured that codegen was possible for the standard
RISC-V instructions. However, there are a number of IR inputs that wouldn't be
appropriately lowered. This patch both adds test cases and supports lowering
for a number of these cases:
* Improved sext/zext/trunc support
* Support for setcc variants that don't map directly to RISC-V instructions
* Lowering mul, and hence support for external symbols
* addc, adde, subc, sube
* mulhs, srem, mulhu, urem, udiv, sdiv
* {srl,sra,shl}_parts
* brind
* br_jt
* bswap, ctlz, cttz, ctpop
* rotl, rotr
* BlockAddress operands

Differential Revision: https://reviews.llvm.org/D29938


Added:
    llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll
    llvm/trunk/test/CodeGen/RISCV/blockaddress.ll
    llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
    llvm/trunk/test/CodeGen/RISCV/div.ll
    llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll
    llvm/trunk/test/CodeGen/RISCV/indirectbr.ll
    llvm/trunk/test/CodeGen/RISCV/jumptable.ll
    llvm/trunk/test/CodeGen/RISCV/mul.ll
    llvm/trunk/test/CodeGen/RISCV/rem.ll
    llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll
    llvm/trunk/test/CodeGen/RISCV/shifts.ll
Modified:
    llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/trunk/lib/Target/RISCV/RISCVISelLowering.h
    llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/trunk/lib/Target/RISCV/RISCVMCInstLower.cpp
    llvm/trunk/test/CodeGen/RISCV/alu32.ll

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp?rev=318737&r1=318736&r2=318737&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp Tue Nov 21 00:11:03 2017
@@ -53,17 +53,54 @@ RISCVTargetLowering::RISCVTargetLowering
     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
 
   // TODO: add all necessary setOperationAction calls.
-  setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
-
+  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
   setOperationAction(ISD::BR_CC, XLenVT, Expand);
   setOperationAction(ISD::SELECT, XLenVT, Custom);
   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
 
+  for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
+    setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
+
+  setOperationAction(ISD::ADDC, XLenVT, Expand);
+  setOperationAction(ISD::ADDE, XLenVT, Expand);
+  setOperationAction(ISD::SUBC, XLenVT, Expand);
+  setOperationAction(ISD::SUBE, XLenVT, Expand);
+
+  setOperationAction(ISD::SREM, XLenVT, Expand);
+  setOperationAction(ISD::SDIVREM, XLenVT, Expand);
+  setOperationAction(ISD::SDIV, XLenVT, Expand);
+  setOperationAction(ISD::UREM, XLenVT, Expand);
+  setOperationAction(ISD::UDIVREM, XLenVT, Expand);
+  setOperationAction(ISD::UDIV, XLenVT, Expand);
+
+  setOperationAction(ISD::MUL, XLenVT, Expand);
+  setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
+  setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
+  setOperationAction(ISD::MULHS, XLenVT, Expand);
+  setOperationAction(ISD::MULHU, XLenVT, Expand);
+
+  setOperationAction(ISD::SHL_PARTS, XLenVT, Expand);
+  setOperationAction(ISD::SRL_PARTS, XLenVT, Expand);
+  setOperationAction(ISD::SRA_PARTS, XLenVT, Expand);
+
+  setOperationAction(ISD::ROTL, XLenVT, Expand);
+  setOperationAction(ISD::ROTR, XLenVT, Expand);
+  setOperationAction(ISD::BSWAP, XLenVT, Expand);
+  setOperationAction(ISD::CTTZ, XLenVT, Expand);
+  setOperationAction(ISD::CTLZ, XLenVT, Expand);
+  setOperationAction(ISD::CTPOP, XLenVT, Expand);
+
+  setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
+  setOperationAction(ISD::BlockAddress, XLenVT, Custom);
+
   setBooleanContents(ZeroOrOneBooleanContent);
 
   // Function alignments (log2).
   setMinFunctionAlignment(3);
   setPrefFunctionAlignment(3);
+
+  // Effectively disable jump table generation.
+  setMinimumJumpTableEntries(INT_MAX);
 }
 
 // Changes the condition code and swaps operands if necessary, so the SetCC
@@ -112,6 +149,8 @@ SDValue RISCVTargetLowering::LowerOperat
     report_fatal_error("unimplemented operand");
   case ISD::GlobalAddress:
     return lowerGlobalAddress(Op, DAG);
+  case ISD::BlockAddress:
+    return lowerBlockAddress(Op, DAG);
   case ISD::SELECT:
     return lowerSELECT(Op, DAG);
   }
@@ -125,18 +164,56 @@ SDValue RISCVTargetLowering::lowerGlobal
   const GlobalValue *GV = N->getGlobal();
   int64_t Offset = N->getOffset();
 
-  if (!isPositionIndependent() && !Subtarget.is64Bit()) {
-    SDValue GAHi =
-        DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI);
-    SDValue GALo =
-        DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO);
-    SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
-    SDValue MNLo =
-        SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
-    return MNLo;
-  } else {
+  if (isPositionIndependent() || Subtarget.is64Bit())
     report_fatal_error("Unable to lowerGlobalAddress");
-  }
+
+  SDValue GAHi =
+    DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI);
+  SDValue GALo =
+    DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO);
+  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
+  SDValue MNLo =
+    SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
+  return MNLo;
+}
+
+SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
+                                               SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT Ty = Op.getValueType();
+  BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+  const BlockAddress *BA = N->getBlockAddress();
+  int64_t Offset = N->getOffset();
+
+  if (isPositionIndependent() || Subtarget.is64Bit())
+    report_fatal_error("Unable to lowerBlockAddress");
+
+  SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
+  SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
+  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
+  SDValue MNLo =
+    SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
+  return MNLo;
+}
+
+SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT Ty = Op.getValueType();
+  ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
+  const char *Sym = N->getSymbol();
+
+  // TODO: should also handle gp-relative loads.
+
+  if (isPositionIndependent() || Subtarget.is64Bit())
+    report_fatal_error("Unable to lowerExternalSymbol");
+
+  SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
+  SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
+  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
+  SDValue MNLo =
+    SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
+  return MNLo;
 }
 
 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -369,8 +446,7 @@ SDValue RISCVTargetLowering::LowerCall(C
   if (isa<GlobalAddressSDNode>(Callee)) {
     Callee = lowerGlobalAddress(Callee, DAG);
   } else if (isa<ExternalSymbolSDNode>(Callee)) {
-    report_fatal_error(
-        "lowerExternalSymbol, needed for lowerCall, not yet handled");
+    Callee = lowerExternalSymbol(Callee, DAG);
   }
 
   // The first call operand is the chain and the second is the target address.

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.h?rev=318737&r1=318736&r2=318737&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.h (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.h Tue Nov 21 00:11:03 2017
@@ -65,6 +65,8 @@ private:
     return true;
   }
   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
 };
 }

Modified: llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td?rev=318737&r1=318736&r2=318737&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td Tue Nov 21 00:11:03 2017
@@ -328,6 +328,17 @@ def : PatGprSimm12<setlt, SLTI>;
 def : PatGprGpr<setult, SLTU>;
 def : PatGprSimm12<setult, SLTIU>;
 
+// Define pattern expansions for setcc operations that aren't directly
+// handled by a RISC-V instruction.
+def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
+def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
+def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
+
 let usesCustomInserter = 1 in
 def Select_GPR_Using_CC_GPR
     : Pseudo<(outs GPR:$dst),
@@ -370,6 +381,15 @@ def PseudoBR : Pseudo<(outs), (ins simm2
                PseudoInstExpansion<(JAL X0, simm21_lsb0:$imm20)>;
 
 let isCall = 1, Defs=[X1] in
+let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
+def PseudoBRIND : Pseudo<(outs), (ins GPR:$rs1, simm12:$imm12), []>,
+                  PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>;
+
+def : Pat<(brind GPR:$rs1), (PseudoBRIND GPR:$rs1, 0)>;
+def : Pat<(brind (add GPR:$rs1, simm12:$imm12)),
+          (PseudoBRIND GPR:$rs1, simm12:$imm12)>;
+
+let isCall = 1, Defs = [X1] in
 def PseudoCALL : Pseudo<(outs), (ins GPR:$rs1), [(Call GPR:$rs1)]>,
                  PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
 

Modified: llvm/trunk/lib/Target/RISCV/RISCVMCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVMCInstLower.cpp?rev=318737&r1=318736&r2=318737&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVMCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVMCInstLower.cpp Tue Nov 21 00:11:03 2017
@@ -81,6 +81,14 @@ bool llvm::LowerRISCVMachineOperandToMCO
   case MachineOperand::MO_GlobalAddress:
     MCOp = lowerSymbolOperand(MO, AP.getSymbol(MO.getGlobal()), AP);
     break;
+  case MachineOperand::MO_BlockAddress:
+    MCOp = lowerSymbolOperand(
+        MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP);
+    break;
+  case MachineOperand::MO_ExternalSymbol:
+    MCOp = lowerSymbolOperand(
+        MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
+    break;
   }
   return true;
 }

Added: llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/addc-adde-sube-subc.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+; Ensure that the ISDOpcodes ADDC, ADDE, SUBC, SUBE are handled correctly
+
+define i64 @addc_adde(i64 %a, i64 %b) {
+; RV32I-LABEL: addc_adde:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    add a2, a0, a2
+; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    addi a0, a2, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = add i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @subc_sube(i64 %a, i64 %b) {
+; RV32I-LABEL: subc_sube:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sltu a3, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sub i64 %a, %b
+  ret i64 %1
+}

Modified: llvm/trunk/test/CodeGen/RISCV/alu32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu32.ll?rev=318737&r1=318736&r2=318737&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu32.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/alu32.ll Tue Nov 21 00:11:03 2017
@@ -2,6 +2,10 @@
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
 
+; These tests are each targeted at a particular RISC-V ALU instruction. Other
+; files in this folder exercise LLVM IR instructions that don't directly match a
+; RISC-V instruction
+
 ; Register-immediate instructions
 
 define i32 @addi(i32 %a) nounwind {

Added: llvm/trunk/test/CodeGen/RISCV/blockaddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/blockaddress.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/blockaddress.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/blockaddress.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+ at addr = global i8* null
+
+define void @test_blockaddress() nounwind {
+; RV32I-LABEL: test_blockaddress:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 0(s0)
+; RV32I-NEXT:    lui a0, %hi(addr)
+; RV32I-NEXT:    addi a0, a0, %lo(addr)
+; RV32I-NEXT:    lui a1, %hi(.Ltmp0)
+; RV32I-NEXT:    addi a1, a1, %lo(.Ltmp0)
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    lw a0, 0(a0)
+; RV32I-NEXT:    jalr zero, a0, 0
+; RV32I-NEXT:  .Ltmp0: # Block address taken
+; RV32I-NEXT:  .LBB0_1: # %block
+; RV32I-NEXT:    lw ra, 0(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr
+  %val = load volatile i8*, i8** @addr
+  indirectbr i8* %val, [label %block]
+
+block:
+  ret void
+}

Added: llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,547 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
+declare i8 @llvm.cttz.i8(i8, i1)
+declare i16 @llvm.cttz.i16(i16, i1)
+declare i32 @llvm.cttz.i32(i32, i1)
+declare i64 @llvm.cttz.i64(i64, i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare i32 @llvm.ctpop.i32(i32)
+
+define i16 @test_bswap_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bswap_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    lui a1, 4080
+; RV32I-NEXT:    addi a1, a1, 0
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i16 @llvm.bswap.i16(i16 %a)
+  ret i16 %tmp
+}
+
+define i32 @test_bswap_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bswap_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -256
+; RV32I-NEXT:    srli a2, a0, 8
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    srli a2, a0, 24
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    lui a2, 4080
+; RV32I-NEXT:    addi a2, a2, 0
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @test_bswap_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bswap_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a3, a2, -256
+; RV32I-NEXT:    srli a2, a1, 8
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    srli a4, a1, 24
+; RV32I-NEXT:    or a2, a2, a4
+; RV32I-NEXT:    lui a4, 4080
+; RV32I-NEXT:    addi a4, a4, 0
+; RV32I-NEXT:    slli a5, a1, 8
+; RV32I-NEXT:    and a5, a5, a4
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    or a1, a1, a5
+; RV32I-NEXT:    or a2, a1, a2
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 24
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a3, a3, a4
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a0, a1
+; RV32I-NEXT:    addi a0, a2, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %tmp
+}
+
+define i8 @test_cttz_i8(i8 %a) nounwind {
+; RV32I-LABEL: test_cttz_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    addi a0, zero, 8
+; RV32I-NEXT:    andi a2, a1, 255
+; RV32I-NEXT:    addi a3, zero, 0
+; RV32I-NEXT:    beq a2, a3, .LBB3_2
+; RV32I-NEXT:    jal zero, .LBB3_1
+; RV32I-NEXT:  .LBB3_1: # %cond.false
+; RV32I-NEXT:    addi a0, a1, -1
+; RV32I-NEXT:    xori a1, a1, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:  .LBB3_2: # %cond.end
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_cttz_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    addi a0, zero, 16
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a2, a1, a2
+; RV32I-NEXT:    addi a3, zero, 0
+; RV32I-NEXT:    beq a2, a3, .LBB4_2
+; RV32I-NEXT:    jal zero, .LBB4_1
+; RV32I-NEXT:  .LBB4_1: # %cond.false
+; RV32I-NEXT:    addi a0, a1, -1
+; RV32I-NEXT:    xori a1, a1, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:  .LBB4_2: # %cond.end
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_cttz_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    addi a2, zero, 0
+; RV32I-NEXT:    beq a1, a2, .LBB5_2
+; RV32I-NEXT:    jal zero, .LBB5_1
+; RV32I-NEXT:  .LBB5_1: # %cond.false
+; RV32I-NEXT:    addi a0, a1, -1
+; RV32I-NEXT:    xori a1, a1, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:  .LBB5_2: # %cond.end
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i32 @test_ctlz_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_ctlz_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    addi a2, zero, 0
+; RV32I-NEXT:    beq a1, a2, .LBB6_2
+; RV32I-NEXT:    jal zero, .LBB6_1
+; RV32I-NEXT:  .LBB6_1: # %cond.false
+; RV32I-NEXT:    srli a0, a1, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 16
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    xori a0, a0, -1
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:  .LBB6_2: # %cond.end
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_cttz_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 28(s0)
+; RV32I-NEXT:    sw s1, 24(s0)
+; RV32I-NEXT:    sw s2, 20(s0)
+; RV32I-NEXT:    sw s3, 16(s0)
+; RV32I-NEXT:    sw s4, 12(s0)
+; RV32I-NEXT:    sw s5, 8(s0)
+; RV32I-NEXT:    sw s6, 4(s0)
+; RV32I-NEXT:    sw s7, 0(s0)
+; RV32I-NEXT:    addi s1, a1, 0
+; RV32I-NEXT:    addi s2, a0, 0
+; RV32I-NEXT:    addi a0, s2, -1
+; RV32I-NEXT:    xori a1, s2, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi s4, a1, 1365
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi s5, a1, 819
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi s3, a1, 257
+; RV32I-NEXT:    lui a1, %hi(__mulsi3)
+; RV32I-NEXT:    addi s6, a1, %lo(__mulsi3)
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi s7, a1, -241
+; RV32I-NEXT:    and a0, a0, s7
+; RV32I-NEXT:    addi a1, s3, 0
+; RV32I-NEXT:    jalr ra, s6, 0
+; RV32I-NEXT:    addi a1, s1, -1
+; RV32I-NEXT:    xori a2, s1, -1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    and a2, a2, s4
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    and a2, a1, s5
+; RV32I-NEXT:    srli a1, a1, 2
+; RV32I-NEXT:    and a1, a1, s5
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    and a1, a1, s7
+; RV32I-NEXT:    srli s1, a0, 24
+; RV32I-NEXT:    addi a0, a1, 0
+; RV32I-NEXT:    addi a1, s3, 0
+; RV32I-NEXT:    jalr ra, s6, 0
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    bne s2, a1, .LBB7_2
+; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    addi s1, a0, 32
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    addi a0, s1, 0
+; RV32I-NEXT:    lw s7, 0(s0)
+; RV32I-NEXT:    lw s6, 4(s0)
+; RV32I-NEXT:    lw s5, 8(s0)
+; RV32I-NEXT:    lw s4, 12(s0)
+; RV32I-NEXT:    lw s3, 16(s0)
+; RV32I-NEXT:    lw s2, 20(s0)
+; RV32I-NEXT:    lw s1, 24(s0)
+; RV32I-NEXT:    lw ra, 28(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
+  ret i64 %tmp
+}
+
+define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
+; RV32I-LABEL: test_cttz_i8_zero_undef:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    xori a0, a0, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
+; RV32I-LABEL: test_cttz_i16_zero_undef:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    xori a0, a0, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
+; RV32I-LABEL: test_cttz_i32_zero_undef:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    xori a0, a0, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
+; RV32I-LABEL: test_cttz_i64_zero_undef:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 28(s0)
+; RV32I-NEXT:    sw s1, 24(s0)
+; RV32I-NEXT:    sw s2, 20(s0)
+; RV32I-NEXT:    sw s3, 16(s0)
+; RV32I-NEXT:    sw s4, 12(s0)
+; RV32I-NEXT:    sw s5, 8(s0)
+; RV32I-NEXT:    sw s6, 4(s0)
+; RV32I-NEXT:    sw s7, 0(s0)
+; RV32I-NEXT:    addi s1, a1, 0
+; RV32I-NEXT:    addi s2, a0, 0
+; RV32I-NEXT:    addi a0, s2, -1
+; RV32I-NEXT:    xori a1, s2, -1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi s4, a1, 1365
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi s5, a1, 819
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi s3, a1, 257
+; RV32I-NEXT:    lui a1, %hi(__mulsi3)
+; RV32I-NEXT:    addi s6, a1, %lo(__mulsi3)
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi s7, a1, -241
+; RV32I-NEXT:    and a0, a0, s7
+; RV32I-NEXT:    addi a1, s3, 0
+; RV32I-NEXT:    jalr ra, s6, 0
+; RV32I-NEXT:    addi a1, s1, -1
+; RV32I-NEXT:    xori a2, s1, -1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    and a2, a2, s4
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    and a2, a1, s5
+; RV32I-NEXT:    srli a1, a1, 2
+; RV32I-NEXT:    and a1, a1, s5
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    and a1, a1, s7
+; RV32I-NEXT:    srli s1, a0, 24
+; RV32I-NEXT:    addi a0, a1, 0
+; RV32I-NEXT:    addi a1, s3, 0
+; RV32I-NEXT:    jalr ra, s6, 0
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    bne s2, a1, .LBB11_2
+; RV32I-NEXT:  # BB#1:
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    addi s1, a0, 32
+; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    addi a0, s1, 0
+; RV32I-NEXT:    lw s7, 0(s0)
+; RV32I-NEXT:    lw s6, 4(s0)
+; RV32I-NEXT:    lw s5, 8(s0)
+; RV32I-NEXT:    lw s4, 12(s0)
+; RV32I-NEXT:    lw s3, 16(s0)
+; RV32I-NEXT:    lw s2, 20(s0)
+; RV32I-NEXT:    lw s1, 24(s0)
+; RV32I-NEXT:    lw ra, 28(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
+  ret i64 %tmp
+}
+
+define i32 @test_ctpop_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_ctpop_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a1, 349525
+; RV32I-NEXT:    addi a1, a1, 1365
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = call i32 @llvm.ctpop.i32(i32 %a)
+  ret i32 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/div.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/div.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/div.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+define i32 @udiv(i32 %a, i32 %b) {
+; RV32I-LABEL: udiv:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__udivsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__udivsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @udiv_constant(i32 %a) {
+; RV32I-LABEL: udiv_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a1, %hi(__udivsi3)
+; RV32I-NEXT:    addi a2, a1, %lo(__udivsi3)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = udiv i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @udiv_pow2(i32 %a) {
+; RV32I-LABEL: udiv_pow2:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    srli a0, a0, 3
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = udiv i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @udiv64(i64 %a, i64 %b) {
+; RV32I-LABEL: udiv64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a4, %hi(__udivdi3)
+; RV32I-NEXT:    addi a4, a4, %lo(__udivdi3)
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = udiv i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @udiv64_constant(i64 %a) {
+; RV32I-LABEL: udiv64_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__udivdi3)
+; RV32I-NEXT:    addi a4, a2, %lo(__udivdi3)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    addi a3, zero, 0
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = udiv i64 %a, 5
+  ret i64 %1
+}
+
+define i32 @sdiv(i32 %a, i32 %b) {
+; RV32I-LABEL: sdiv:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__divsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__divsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @sdiv_constant(i32 %a) {
+; RV32I-LABEL: sdiv_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a1, %hi(__divsi3)
+; RV32I-NEXT:    addi a2, a1, %lo(__divsi3)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sdiv i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @sdiv_pow2(i32 %a) {
+; RV32I-LABEL: sdiv_pow2:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    srli a1, a1, 29
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    srai a0, a0, 3
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sdiv i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @sdiv64(i64 %a, i64 %b) {
+; RV32I-LABEL: sdiv64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a4, %hi(__divdi3)
+; RV32I-NEXT:    addi a4, a4, %lo(__divdi3)
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sdiv i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @sdiv64_constant(i64 %a) {
+; RV32I-LABEL: sdiv64_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__divdi3)
+; RV32I-NEXT:    addi a4, a2, %lo(__divdi3)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    addi a3, zero, 0
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sdiv i64 %a, 5
+  ret i64 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/i32-icmp.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+; TODO: check the generated instructions for the equivalent of seqz, snez,
+; sltz, sgtz map to something simple
+
+define i32 @icmp_eq(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_eq:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sltiu a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp eq i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ne(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_ne:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    sltu a0, zero, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp ne i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ugt(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_ugt:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sltu a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp ugt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_uge(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_uge:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sltu a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp uge i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ult(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_ult:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sltu a0, a0, a1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp ult i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ule(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_ule:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sltu a0, a1, a0
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp ule i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_sgt:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp sgt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sge(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_sge:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slt a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp sge i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_slt(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_slt:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slt a0, a0, a1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp slt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sle(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: icmp_sle:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = icmp sle i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; TODO: check variants with an immediate?

Added: llvm/trunk/test/CodeGen/RISCV/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/indirectbr.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/indirectbr.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/indirectbr.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+define i32 @indirectbr(i8* %target) nounwind {
+; RV32I-LABEL: indirectbr:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 0(s0)
+; RV32I-NEXT:    jalr zero, a0, 0
+; RV32I-NEXT:  .LBB0_1: # %ret
+; RV32I-NEXT:    addi a0, zero, 0
+; RV32I-NEXT:    lw ra, 0(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  indirectbr i8* %target, [label %test_label]
+test_label:
+  br label %ret
+ret:
+  ret i32 0
+}
+
+define i32 @indirectbr_with_offset(i8* %a) nounwind {
+; RV32I-LABEL: indirectbr_with_offset:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 0(s0)
+; RV32I-NEXT:    jalr zero, a0, 1380
+; RV32I-NEXT:  .LBB1_1: # %ret
+; RV32I-NEXT:    addi a0, zero, 0
+; RV32I-NEXT:    lw ra, 0(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %target = getelementptr inbounds i8, i8* %a, i32 1380
+  indirectbr i8* %target, [label %test_label]
+test_label:
+  br label %ret
+ret:
+  ret i32 0
+}

Added: llvm/trunk/test/CodeGen/RISCV/jumptable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/jumptable.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/jumptable.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/jumptable.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+define void @jt(i32 %in, i32* %out) {
+; RV32I-LABEL: jt:
+; RV32I:       # BB#0: # %entry
+; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    blt a2, a0, .LBB0_3
+; RV32I-NEXT:    jal zero, .LBB0_1
+; RV32I-NEXT:  .LBB0_1: # %entry
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    beq a0, a3, .LBB0_5
+; RV32I-NEXT:    jal zero, .LBB0_2
+; RV32I-NEXT:  .LBB0_2: # %entry
+; RV32I-NEXT:    beq a0, a2, .LBB0_6
+; RV32I-NEXT:    jal zero, .LBB0_9
+; RV32I-NEXT:  .LBB0_6: # %bb2
+; RV32I-NEXT:    addi a0, zero, 3
+; RV32I-NEXT:    sw a0, 0(a1)
+; RV32I-NEXT:    jal zero, .LBB0_9
+; RV32I-NEXT:  .LBB0_3: # %entry
+; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    beq a0, a3, .LBB0_7
+; RV32I-NEXT:    jal zero, .LBB0_4
+; RV32I-NEXT:  .LBB0_4: # %entry
+; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    beq a0, a2, .LBB0_8
+; RV32I-NEXT:    jal zero, .LBB0_9
+; RV32I-NEXT:  .LBB0_8: # %bb4
+; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    sw a0, 0(a1)
+; RV32I-NEXT:  .LBB0_9: # %exit
+; RV32I-NEXT:    jalr zero, ra, 0
+; RV32I-NEXT:  .LBB0_5: # %bb1
+; RV32I-NEXT:    addi a0, zero, 4
+; RV32I-NEXT:    sw a0, 0(a1)
+; RV32I-NEXT:    jal zero, .LBB0_9
+; RV32I-NEXT:  .LBB0_7: # %bb3
+; RV32I-NEXT:    sw a2, 0(a1)
+; RV32I-NEXT:    jal zero, .LBB0_9
+entry:
+  switch i32 %in, label %exit [
+    i32 1, label %bb1
+    i32 2, label %bb2
+    i32 3, label %bb3
+    i32 4, label %bb4
+  ]
+bb1:
+  store i32 4, i32* %out
+  br label %exit
+bb2:
+  store i32 3, i32* %out
+  br label %exit
+bb3:
+  store i32 2, i32* %out
+  br label %exit
+bb4:
+  store i32 1, i32* %out
+  br label %exit
+exit:
+  ret void
+}

Added: llvm/trunk/test/CodeGen/RISCV/mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/mul.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/mul.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/mul.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+define i32 @square(i32 %a) {
+; RV32I-LABEL: square:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a1, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a1, %lo(__mulsi3)
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i32 %a, %a
+  ret i32 %1
+}
+
+define i32 @mul(i32 %a, i32 %b) {
+; RV32I-LABEL: mul:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__mulsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @mul_constant(i32 %a) {
+; RV32I-LABEL: mul_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a1, %hi(__mulsi3)
+; RV32I-NEXT:    addi a2, a1, %lo(__mulsi3)
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @mul_pow2(i32 %a) {
+; RV32I-LABEL: mul_pow2:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a0, a0, 3
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @mul64(i64 %a, i64 %b) {
+; RV32I-LABEL: mul64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a4, %hi(__muldi3)
+; RV32I-NEXT:    addi a4, a4, %lo(__muldi3)
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @mul64_constant(i64 %a) {
+; RV32I-LABEL: mul64_constant:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__muldi3)
+; RV32I-NEXT:    addi a4, a2, %lo(__muldi3)
+; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    addi a3, zero, 0
+; RV32I-NEXT:    jalr ra, a4, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = mul i64 %a, 5
+  ret i64 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rem.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rem.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/rem.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+define i32 @urem(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: urem:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__umodsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__umodsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @srem(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: srem:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a2, %hi(__modsi3)
+; RV32I-NEXT:    addi a2, a2, %lo(__modsi3)
+; RV32I-NEXT:    jalr ra, a2, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/rotl-rotr.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+; These IR sequences will generate ISD::ROTL and ISD::ROTR nodes, that the
+; RISC-V backend must be able to select
+
+define i32 @rotl(i32 %x, i32 %y) {
+; RV32I-LABEL: rotl:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    addi a2, zero, 32
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    sll a1, a0, a1
+; RV32I-NEXT:    srl a0, a0, a2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %z = sub i32 32, %y
+  %b = shl i32 %x, %y
+  %c = lshr i32 %x, %z
+  %d = or i32 %b, %c
+  ret i32 %d
+}
+
+define i32 @rotr(i32 %x, i32 %y) {
+; RV32I-LABEL: rotr:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    addi a2, zero, 32
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    srl a1, a0, a1
+; RV32I-NEXT:    sll a0, a0, a2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %z = sub i32 32, %y
+  %b = lshr i32 %x, %y
+  %c = shl i32 %x, %z
+  %d = or i32 %b, %c
+  ret i32 %d
+}

Added: llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,296 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+; FIXME: an unncessary register is allocated just to store 0. X0 should be
+; used instead
+
+define i8 @sext_i1_to_i8(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; RV32I-LABEL: sext_i1_to_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i1 %a to i8
+  ret i8 %1
+}
+
+define i16 @sext_i1_to_i16(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; RV32I-LABEL: sext_i1_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i1 %a to i16
+  ret i16 %1
+}
+
+define i32 @sext_i1_to_i32(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; RV32I-LABEL: sext_i1_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i1 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i1_to_i64(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; RV32I-LABEL: sext_i1_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    addi a1, a0, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i1 %a to i64
+  ret i64 %1
+}
+
+define i16 @sext_i8_to_i16(i8 %a) {
+; RV32I-LABEL: sext_i8_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i8 %a to i16
+  ret i16 %1
+}
+
+define i32 @sext_i8_to_i32(i8 %a) {
+; RV32I-LABEL: sext_i8_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i8 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i8_to_i64(i8 %a) {
+; RV32I-LABEL: sext_i8_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a0, a1, 24
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i8 %a to i64
+  ret i64 %1
+}
+
+define i32 @sext_i16_to_i32(i16 %a) {
+; RV32I-LABEL: sext_i16_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i16 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i16_to_i64(i16 %a) {
+; RV32I-LABEL: sext_i16_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a0, a1, 16
+; RV32I-NEXT:    srai a1, a1, 31
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i16 %a to i64
+  ret i64 %1
+}
+
+define i64 @sext_i32_to_i64(i32 %a) {
+; RV32I-LABEL: sext_i32_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = sext i32 %a to i64
+  ret i64 %1
+}
+
+define i8 @zext_i1_to_i8(i1 %a) {
+; RV32I-LABEL: zext_i1_to_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i1 %a to i8
+  ret i8 %1
+}
+
+define i16 @zext_i1_to_i16(i1 %a) {
+; RV32I-LABEL: zext_i1_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i1 %a to i16
+  ret i16 %1
+}
+
+define i32 @zext_i1_to_i32(i1 %a) {
+; RV32I-LABEL: zext_i1_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i1 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i1_to_i64(i1 %a) {
+; RV32I-LABEL: zext_i1_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i1 %a to i64
+  ret i64 %1
+}
+
+define i16 @zext_i8_to_i16(i8 %a) {
+; RV32I-LABEL: zext_i8_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i8 %a to i16
+  ret i16 %1
+}
+
+define i32 @zext_i8_to_i32(i8 %a) {
+; RV32I-LABEL: zext_i8_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i8 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i8_to_i64(i8 %a) {
+; RV32I-LABEL: zext_i8_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i8 %a to i64
+  ret i64 %1
+}
+
+define i32 @zext_i16_to_i32(i16 %a) {
+; RV32I-LABEL: zext_i16_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i16 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i16_to_i64(i16 %a) {
+; RV32I-LABEL: zext_i16_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i16 %a to i64
+  ret i64 %1
+}
+
+define i64 @zext_i32_to_i64(i32 %a) {
+; RV32I-LABEL: zext_i32_to_i64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    addi a1, zero, 0
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = zext i32 %a to i64
+  ret i64 %1
+}
+
+; TODO: should the trunc tests explicitly ensure no code is generated before
+; jalr?
+
+define i1 @trunc_i8_to_i1(i8 %a) {
+; RV32I-LABEL: trunc_i8_to_i1:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i8 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i16_to_i1(i16 %a) {
+; RV32I-LABEL: trunc_i16_to_i1:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i16 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i32_to_i1(i32 %a) {
+; RV32I-LABEL: trunc_i32_to_i1:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i32 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i64_to_i1(i64 %a) {
+; RV32I-LABEL: trunc_i64_to_i1:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i64 %a to i1
+  ret i1 %1
+}
+
+define i8 @trunc_i16_to_i8(i16 %a) {
+; RV32I-LABEL: trunc_i16_to_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i16 %a to i8
+  ret i8 %1
+}
+
+define i8 @trunc_i32_to_i8(i32 %a) {
+; RV32I-LABEL: trunc_i32_to_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i32 %a to i8
+  ret i8 %1
+}
+
+define i8 @trunc_i64_to_i8(i64 %a) {
+; RV32I-LABEL: trunc_i64_to_i8:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i64 %a to i8
+  ret i8 %1
+}
+
+define i16 @trunc_i32_to_i16(i32 %a) {
+; RV32I-LABEL: trunc_i32_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i32 %a to i16
+  ret i16 %1
+}
+
+define i16 @trunc_i64_to_i16(i64 %a) {
+; RV32I-LABEL: trunc_i64_to_i16:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i64 %a to i16
+  ret i16 %1
+}
+
+define i32 @trunc_i64_to_i32(i64 %a) {
+; RV32I-LABEL: trunc_i64_to_i32:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = trunc i64 %a to i32
+  ret i32 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/shifts.ll?rev=318737&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/shifts.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/shifts.ll Tue Nov 21 00:11:03 2017
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+; Basic shift support is tested as part of ALU.ll. This file ensures that
+; shifts which may not be supported natively are lowered properly.
+
+define i64 @lshr64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: lshr64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a3, %hi(__lshrdi3)
+; RV32I-NEXT:    addi a3, a3, %lo(__lshrdi3)
+; RV32I-NEXT:    jalr ra, a3, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = lshr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @ashr64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: ashr64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a3, %hi(__ashrdi3)
+; RV32I-NEXT:    addi a3, a3, %lo(__ashrdi3)
+; RV32I-NEXT:    jalr ra, a3, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = ashr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @shl64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: shl64:
+; RV32I:       # BB#0:
+; RV32I-NEXT:    sw ra, 12(s0)
+; RV32I-NEXT:    lui a3, %hi(__ashldi3)
+; RV32I-NEXT:    addi a3, a3, %lo(__ashldi3)
+; RV32I-NEXT:    jalr ra, a3, 0
+; RV32I-NEXT:    lw ra, 12(s0)
+; RV32I-NEXT:    jalr zero, ra, 0
+  %1 = shl i64 %a, %b
+  ret i64 %1
+}




More information about the llvm-commits mailing list