[llvm] r347973 - [RISCV] Introduce codegen patterns for instructions introduced in RV64I

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 30 01:38:44 PST 2018


Author: asb
Date: Fri Nov 30 01:38:44 2018
New Revision: 347973

URL: http://llvm.org/viewvc/llvm-project?rev=347973&view=rev
Log:
[RISCV] Introduce codegen patterns for instructions introduced in RV64I

As discussed in the RFC 
<http://lists.llvm.org/pipermail/llvm-dev/2018-October/126690.html>, 64-bit 
RISC-V has i64 as the only legal integer type.  This patch introduces patterns 
to support codegen of the new instructions 
introduced in RV64I: addiw, addiw, subw, sllw, slliw, srlw, srliw, sraw, 
sraiw, ld, sd.

Custom selection code is needed for srliw as SimplifyDemandedBits will remove 
lower bits from the mask, meaning the obvious pattern won't work:

def : Pat<(sext_inreg (srl (and GPR:$rs1, 0xffffffff), uimm5:$shamt), i32),
          (SRLIW GPR:$rs1, uimm5:$shamt)>;
This is sufficient to compile and execute all of the GCC torture suite for 
RV64I other than those files using frameaddr or returnaddr intrinsics 
(LegalizeDAG doesn't know how to promote the operands - a future patch 
addresses this).

When promoting i32 sltu/sltiu operands, it would be more efficient to use 
sign-extension rather than zero-extension for RV64. A future patch adds a hook 
to allow this.

Differential Revision: https://reviews.llvm.org/D52977

Added:
    llvm/trunk/test/CodeGen/RISCV/alu64.ll
    llvm/trunk/test/CodeGen/RISCV/mem64.ll
    llvm/trunk/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
Modified:
    llvm/trunk/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/trunk/test/CodeGen/RISCV/alu16.ll
    llvm/trunk/test/CodeGen/RISCV/alu32.ll
    llvm/trunk/test/CodeGen/RISCV/alu8.ll
    llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelDAGToDAG.cpp?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelDAGToDAG.cpp Fri Nov 30 01:38:44 2018
@@ -85,6 +85,17 @@ static SDNode *selectImm(SelectionDAG *C
   return Result;
 }
 
+// Returns true if the Node is an ISD::AND with a constant argument. If so,
+// set Mask to that constant value.
+static bool isConstantMask(SDNode *Node, uint64_t &Mask) {
+  if (Node->getOpcode() == ISD::AND &&
+      Node->getOperand(1).getOpcode() == ISD::Constant) {
+    Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+    return true;
+  }
+  return false;
+}
+
 void RISCVDAGToDAGISel::Select(SDNode *Node) {
   // If we have a custom node, we have already selected.
   if (Node->isMachineOpcode()) {
@@ -123,6 +134,29 @@ void RISCVDAGToDAGISel::Select(SDNode *N
     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
     return;
   }
+  case ISD::SRL: {
+    if (!Subtarget->is64Bit())
+      break;
+    SDValue Op0 = Node->getOperand(0);
+    SDValue Op1 = Node->getOperand(1);
+    uint64_t Mask;
+    // Match (srl (and val, mask), imm) where the result would be a
+    // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
+    // is equivalent to this (SimplifyDemandedBits may have removed lower bits
+    // from the mask that aren't necessary due to the right-shifting).
+    if (Op1.getOpcode() == ISD::Constant &&
+        isConstantMask(Op0.getNode(), Mask)) {
+      uint64_t ShAmt = cast<ConstantSDNode>(Op1.getNode())->getZExtValue();
+
+      if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) {
+        SDValue ShAmtVal =
+            CurDAG->getTargetConstant(ShAmt, SDLoc(Node), XLenVT);
+        CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0.getOperand(0),
+                             ShAmtVal);
+        return;
+      }
+    }
+  }
   }
 
   // Select the default instruction.

Modified: llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVInstrInfo.td Fri Nov 30 01:38:44 2018
@@ -211,6 +211,9 @@ def immbottomxlenset : ImmLeaf<XLenVT, [
     return countTrailingOnes<uint64_t>(Imm) >= 6;
   return countTrailingOnes<uint64_t>(Imm) >= 5;
 }]>;
+def immshifti32 : ImmLeaf<XLenVT, [{
+  return countTrailingOnes<uint64_t>(Imm) >= 5;
+}]>;
 
 // Addressing modes.
 // Necessary because a frameindex can't be matched directly in a pattern.
@@ -815,7 +818,7 @@ defm : LdPat<sextloadi8, LB>;
 defm : LdPat<extloadi8, LB>;
 defm : LdPat<sextloadi16, LH>;
 defm : LdPat<extloadi16, LH>;
-defm : LdPat<load, LW>;
+defm : LdPat<load, LW>, Requires<[IsRV32]>;
 defm : LdPat<zextloadi8, LBU>;
 defm : LdPat<zextloadi16, LHU>;
 
@@ -866,6 +869,67 @@ def ADJCALLSTACKUP   : Pseudo<(outs), (i
                               [(CallSeqEnd timm:$amt1, timm:$amt2)]>;
 } // Defs = [X2], Uses = [X2]
 
+/// RV64 patterns
+
+def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+  return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def zexti32 : PatFrags<(ops node:$src),
+                       [(and node:$src, 0xffffffff),
+                        (assertzexti32 node:$src)]>;
+
+let Predicates = [IsRV64] in {
+
+/// sext and zext
+
+def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
+def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
+
+/// ALU operations
+
+def : Pat<(sext_inreg (add GPR:$rs1, GPR:$rs2), i32),
+          (ADDW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (add GPR:$rs1, simm12:$imm12), i32),
+          (ADDIW GPR:$rs1, simm12:$imm12)>;
+def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
+          (SUBW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
+          (SLLIW GPR:$rs1, uimm5:$shamt)>;
+// (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
+// need to undo manipulation of the mask value performed by DAGCombine.
+def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
+          (SRAIW GPR:$rs1, uimm5:$shamt)>;
+
+def : Pat<(sext_inreg (shl GPR:$rs1, GPR:$rs2), i32),
+          (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (shl GPR:$rs1, (and GPR:$rs2, immshifti32)), i32),
+          (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(srl (zexti32 GPR:$rs1), GPR:$rs2),
+          (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(srl (zexti32 GPR:$rs1), (and GPR:$rs2, immshifti32)),
+          (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (srl (zexti32 GPR:$rs1), GPR:$rs2), i32),
+          (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (srl (zexti32 GPR:$rs1), (and GPR:$rs2, immshifti32)), i32),
+          (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sra (sext_inreg GPR:$rs1, i32), GPR:$rs2),
+          (SRAW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sra (sext_inreg GPR:$rs1, i32), (and GPR:$rs2, immshifti32)),
+          (SRAW GPR:$rs1, GPR:$rs2)>;
+
+/// Loads
+
+defm : LdPat<sextloadi32, LW>;
+defm : LdPat<extloadi32, LW>;
+defm : LdPat<zextloadi32, LWU>;
+defm : LdPat<load, LD>;
+
+/// Stores
+
+defm : StPat<truncstorei32, SW, GPR>;
+defm : StPat<store, SD, GPR>;
+} // Predicates = [IsRV64]
+
 //===----------------------------------------------------------------------===//
 // Standard extensions
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/test/CodeGen/RISCV/alu16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu16.ll?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu16.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/alu16.ll Fri Nov 30 01:38:44 2018
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
 
 ; These tests are identical to those in alu32.ll but operate on i16. They check
 ; that legalisation of these non-native types doesn't introduce unnecessary
@@ -11,6 +13,11 @@ define i16 @addi(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: addi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = add i16 %a, 1
   ret i16 %1
 }
@@ -22,6 +29,13 @@ define i16 @slti(i16 %a) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    slti a0, a0, 2
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slti:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slti a0, a0, 2
+; RV64I-NEXT:    ret
   %1 = icmp slt i16 %a, 2
   %2 = zext i1 %1 to i16
   ret i16 %2
@@ -35,6 +49,14 @@ define i16 @sltiu(i16 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    sltiu a0, a0, 3
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltiu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    sltiu a0, a0, 3
+; RV64I-NEXT:    ret
   %1 = icmp ult i16 %a, 3
   %2 = zext i1 %1 to i16
   ret i16 %2
@@ -45,6 +67,11 @@ define i16 @xori(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xori a0, a0, 4
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, 4
+; RV64I-NEXT:    ret
   %1 = xor i16 %a, 4
   ret i16 %1
 }
@@ -54,6 +81,11 @@ define i16 @ori(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ori a0, a0, 5
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: ori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ori a0, a0, 5
+; RV64I-NEXT:    ret
   %1 = or i16 %a, 5
   ret i16 %1
 }
@@ -63,6 +95,11 @@ define i16 @andi(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 6
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: andi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 6
+; RV64I-NEXT:    ret
   %1 = and i16 %a, 6
   ret i16 %1
 }
@@ -72,6 +109,11 @@ define i16 @slli(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 7
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    ret
   %1 = shl i16 %a, 7
   ret i16 %1
 }
@@ -84,6 +126,14 @@ define i16 @srli(i16 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    srli a0, a0, 6
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -64
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a0, a0, 6
+; RV64I-NEXT:    ret
   %1 = lshr i16 %a, 6
   ret i16 %1
 }
@@ -94,6 +144,12 @@ define i16 @srai(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 25
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srai:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 57
+; RV64I-NEXT:    ret
   %1 = ashr i16 %a, 9
   ret i16 %1
 }
@@ -104,6 +160,11 @@ define i16 @add(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: add:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = add i16 %a, %b
   ret i16 %1
 }
@@ -113,6 +174,11 @@ define i16 @sub(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sub:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = sub i16 %a, %b
   ret i16 %1
 }
@@ -122,6 +188,11 @@ define i16 @sll(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sll:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = shl i16 %a, %b
   ret i16 %1
 }
@@ -135,6 +206,15 @@ define i16 @slt(i16 %a, i16 %b) nounwind
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slt a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp slt i16 %a, %b
   %2 = zext i1 %1 to i16
   ret i16 %2
@@ -149,6 +229,15 @@ define i16 @sltu(i16 %a, i16 %b) nounwin
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    sltu a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp ult i16 %a, %b
   %2 = zext i1 %1 to i16
   ret i16 %2
@@ -159,6 +248,11 @@ define i16 @xor(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xor:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = xor i16 %a, %b
   ret i16 %1
 }
@@ -171,6 +265,14 @@ define i16 @srl(i16 %a, i16 %b) nounwind
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = lshr i16 %a, %b
   ret i16 %1
 }
@@ -182,6 +284,13 @@ define i16 @sra(i16 %a, i16 %b) nounwind
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sra:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = ashr i16 %a, %b
   ret i16 %1
 }
@@ -191,6 +300,11 @@ define i16 @or(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = or i16 %a, %b
   ret i16 %1
 }
@@ -200,6 +314,11 @@ define i16 @and(i16 %a, i16 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = and i16 %a, %b
   ret i16 %1
 }

Modified: llvm/trunk/test/CodeGen/RISCV/alu32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu32.ll?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu32.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/alu32.ll Fri Nov 30 01:38:44 2018
@@ -1,18 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
 
 ; These tests are each targeted at a particular RISC-V ALU instruction. Other
 ; files in this folder exercise LLVM IR instructions that don't directly match a
 ; RISC-V instruction
 
-; Register-immediate instructions
+; Register-immediate instructions.
+
+; TODO: Sign-extension would also work when promoting the operands of
+; sltu/sltiu on RV64 and is cheaper than zero-extension (1 instruction vs 2).
 
 define i32 @addi(i32 %a) nounwind {
 ; RV32I-LABEL: addi:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: addi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = add i32 %a, 1
   ret i32 %1
 }
@@ -22,6 +32,12 @@ define i32 @slti(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slti a0, a0, 2
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slti:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    slti a0, a0, 2
+; RV64I-NEXT:    ret
   %1 = icmp slt i32 %a, 2
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -32,6 +48,13 @@ define i32 @sltiu(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltiu a0, a0, 3
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltiu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sltiu a0, a0, 3
+; RV64I-NEXT:    ret
   %1 = icmp ult i32 %a, 3
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -42,6 +65,11 @@ define i32 @xori(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xori a0, a0, 4
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, 4
+; RV64I-NEXT:    ret
   %1 = xor i32 %a, 4
   ret i32 %1
 }
@@ -51,6 +79,11 @@ define i32 @ori(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ori a0, a0, 5
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: ori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ori a0, a0, 5
+; RV64I-NEXT:    ret
   %1 = or i32 %a, 5
   ret i32 %1
 }
@@ -60,6 +93,11 @@ define i32 @andi(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 6
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: andi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 6
+; RV64I-NEXT:    ret
   %1 = and i32 %a, 6
   ret i32 %1
 }
@@ -69,6 +107,11 @@ define i32 @slli(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 7
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    ret
   %1 = shl i32 %a, 7
   ret i32 %1
 }
@@ -78,6 +121,11 @@ define i32 @srli(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srli a0, a0, 8
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 8
+; RV64I-NEXT:    ret
   %1 = lshr i32 %a, 8
   ret i32 %1
 }
@@ -87,6 +135,11 @@ define i32 @srai(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srai a0, a0, 9
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srai:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 9
+; RV64I-NEXT:    ret
   %1 = ashr i32 %a, 9
   ret i32 %1
 }
@@ -98,6 +151,11 @@ define i32 @add(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: add:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = add i32 %a, %b
   ret i32 %1
 }
@@ -107,6 +165,11 @@ define i32 @sub(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sub:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = sub i32 %a, %b
   ret i32 %1
 }
@@ -116,6 +179,11 @@ define i32 @sll(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sll:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = shl i32 %a, %b
   ret i32 %1
 }
@@ -125,6 +193,13 @@ define i32 @slt(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    slt a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp slt i32 %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -135,6 +210,15 @@ define i32 @sltu(i32 %a, i32 %b) nounwin
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sltu a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp ult i32 %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -145,6 +229,11 @@ define i32 @xor(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xor:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = xor i32 %a, %b
   ret i32 %1
 }
@@ -154,6 +243,11 @@ define i32 @srl(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = lshr i32 %a, %b
   ret i32 %1
 }
@@ -163,6 +257,11 @@ define i32 @sra(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sra:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = ashr i32 %a, %b
   ret i32 %1
 }
@@ -172,6 +271,11 @@ define i32 @or(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = or i32 %a, %b
   ret i32 %1
 }
@@ -181,6 +285,11 @@ define i32 @and(i32 %a, i32 %b) nounwind
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = and i32 %a, %b
   ret i32 %1
 }

Added: llvm/trunk/test/CodeGen/RISCV/alu64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu64.ll?rev=347973&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu64.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/alu64.ll Fri Nov 30 01:38:44 2018
@@ -0,0 +1,488 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+
+; These tests are each targeted at a particular RISC-V ALU instruction. Other
+; files in this folder exercise LLVM IR instructions that don't directly match a
+; RISC-V instruction. This file contains tests for the instructions common
+; between RV32I and RV64I as well as the *W instructions introduced in RV64I.
+
+; Register-immediate instructions
+
+define i64 @addi(i64 %a) nounwind {
+; RV64I-LABEL: addi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: addi:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a2, a0, 1
+; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+  %1 = add i64 %a, 1
+  ret i64 %1
+}
+
+define i64 @slti(i64 %a) nounwind {
+; RV64I-LABEL: slti:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slti a0, a0, 2
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: slti:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beqz a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slti a0, a1, 0
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    sltiu a0, a0, 2
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+  %1 = icmp slt i64 %a, 2
+  %2 = zext i1 %1 to i64
+  ret i64 %2
+}
+
+define i64 @sltiu(i64 %a) nounwind {
+; RV64I-LABEL: sltiu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltiu a0, a0, 3
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sltiu:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beqz a1, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    sltiu a0, a0, 3
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+  %1 = icmp ult i64 %a, 3
+  %2 = zext i1 %1 to i64
+  ret i64 %2
+}
+
+define i64 @xori(i64 %a) nounwind {
+; RV64I-LABEL: xori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, 4
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: xori:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xori a0, a0, 4
+; RV32I-NEXT:    ret
+  %1 = xor i64 %a, 4
+  ret i64 %1
+}
+
+define i64 @ori(i64 %a) nounwind {
+; RV64I-LABEL: ori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ori a0, a0, 5
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: ori:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    ori a0, a0, 5
+; RV32I-NEXT:    ret
+  %1 = or i64 %a, 5
+  ret i64 %1
+}
+
+define i64 @andi(i64 %a) nounwind {
+; RV64I-LABEL: andi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 6
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: andi:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 6
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+  %1 = and i64 %a, 6
+  ret i64 %1
+}
+
+define i64 @slli(i64 %a) nounwind {
+; RV64I-LABEL: slli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: slli:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 7
+; RV32I-NEXT:    srli a2, a0, 25
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a0, a0, 7
+; RV32I-NEXT:    ret
+  %1 = shl i64 %a, 7
+  ret i64 %1
+}
+
+define i64 @srli(i64 %a) nounwind {
+; RV64I-LABEL: srli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: srli:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    slli a2, a1, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    srli a1, a1, 8
+; RV32I-NEXT:    ret
+  %1 = lshr i64 %a, 8
+  ret i64 %1
+}
+
+define i64 @srai(i64 %a) nounwind {
+; RV64I-LABEL: srai:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a0, a0, 9
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: srai:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 9
+; RV32I-NEXT:    slli a2, a1, 23
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    srai a1, a1, 9
+; RV32I-NEXT:    ret
+  %1 = ashr i64 %a, 9
+  ret i64 %1
+}
+
+; Register-register instructions
+
+define i64 @add(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: add:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: add:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    add a2, a0, a2
+; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    add a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+  %1 = add i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @sub(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sub:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sub:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sltu a3, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+  %1 = sub i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @sll(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sll:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sll:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    call __ashldi3
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = shl i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @slt(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: slt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slt a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: slt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB12_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a0, a1, a3
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB12_2:
+; RV32I-NEXT:    sltu a0, a0, a2
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+  %1 = icmp slt i64 %a, %b
+  %2 = zext i1 %1 to i64
+  ret i64 %2
+}
+
+define i64 @sltu(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sltu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sltu:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB13_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a0, a1, a3
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB13_2:
+; RV32I-NEXT:    sltu a0, a0, a2
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    ret
+  %1 = icmp ult i64 %a, %b
+  %2 = zext i1 %1 to i64
+  ret i64 %2
+}
+
+define i64 @xor(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: xor:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: xor:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    xor a1, a1, a3
+; RV32I-NEXT:    ret
+  %1 = xor i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @srl(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: srl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: srl:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    call __lshrdi3
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = lshr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @sra(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: sra:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sra:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp)
+; RV32I-NEXT:    call __ashrdi3
+; RV32I-NEXT:    lw ra, 12(sp)
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+  %1 = ashr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @or(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: or:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: or:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    ret
+  %1 = or i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @and(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: and:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: and:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    ret
+  %1 = and i64 %a, %b
+  ret i64 %1
+}
+
+; RV64I-only instructions
+
+define signext i32 @addiw(i32 signext %a) {
+; RV64I-LABEL: addiw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addiw a0, a0, 123
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: addiw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a0, a0, 123
+; RV32I-NEXT:    ret
+  %1 = add i32 %a, 123
+  ret i32 %1
+}
+
+define signext i32 @slliw(i32 signext %a) {
+; RV64I-LABEL: slliw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 17
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: slliw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    ret
+  %1 = shl i32 %a, 17
+  ret i32 %1
+}
+
+define signext i32 @srliw(i32 %a) {
+; RV64I-LABEL: srliw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 8
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: srliw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    ret
+  %1 = lshr i32 %a, 8
+  ret i32 %1
+}
+
+define signext i32 @sraiw(i32 %a) {
+; RV64I-LABEL: sraiw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 9
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sraiw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srai a0, a0, 9
+; RV32I-NEXT:    ret
+  %1 = ashr i32 %a, 9
+  ret i32 %1
+}
+
+define signext i32 @sextw(i32 zeroext %a) {
+; RV64I-LABEL: sextw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sextw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    ret
+  ret i32 %a
+}
+
+define signext i32 @addw(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: addw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: addw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @subw(i32 signext %a, i32 signext %b) {
+; RV64I-LABEL: subw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: subw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sllw(i32 signext %a, i32 zeroext %b) {
+; RV64I-LABEL: sllw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sllw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sll a0, a0, a1
+; RV32I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @srlw(i32 signext %a, i32 zeroext %b) {
+; RV64I-LABEL: srlw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: srlw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sraw(i64 %a, i32 zeroext %b) {
+; RV64I-LABEL: sraw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32I-LABEL: sraw:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sra a0, a0, a2
+; RV32I-NEXT:    ret
+  %1 = trunc i64 %a to i32
+  %2 = ashr i32 %1, %b
+  ret i32 %2
+}

Modified: llvm/trunk/test/CodeGen/RISCV/alu8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/alu8.ll?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/alu8.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/alu8.ll Fri Nov 30 01:38:44 2018
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
 
 ; These tests are identical to those in alu32.ll but operate on i8. They check
 ; that legalisation of these non-native types doesn't introduce unnecessary
@@ -11,6 +13,11 @@ define i8 @addi(i8 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: addi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = add i8 %a, 1
   ret i8 %1
 }
@@ -22,6 +29,13 @@ define i8 @slti(i8 %a) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    slti a0, a0, 2
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slti:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slti a0, a0, 2
+; RV64I-NEXT:    ret
   %1 = icmp slt i8 %a, 2
   %2 = zext i1 %1 to i8
   ret i8 %2
@@ -33,6 +47,12 @@ define i8 @sltiu(i8 %a) nounwind {
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    sltiu a0, a0, 3
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltiu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    sltiu a0, a0, 3
+; RV64I-NEXT:    ret
   %1 = icmp ult i8 %a, 3
   %2 = zext i1 %1 to i8
   ret i8 %2
@@ -43,6 +63,11 @@ define i8 @xori(i8 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xori a0, a0, 4
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, 4
+; RV64I-NEXT:    ret
   %1 = xor i8 %a, 4
   ret i8 %1
 }
@@ -52,6 +77,11 @@ define i8 @ori(i8 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ori a0, a0, 5
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: ori:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ori a0, a0, 5
+; RV64I-NEXT:    ret
   %1 = or i8 %a, 5
   ret i8 %1
 }
@@ -61,6 +91,11 @@ define i8 @andi(i8 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 6
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: andi:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 6
+; RV64I-NEXT:    ret
   %1 = and i8 %a, 6
   ret i8 %1
 }
@@ -70,6 +105,11 @@ define i8 @slli(i8 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 7
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    ret
   %1 = shl i8 %a, 7
   ret i8 %1
 }
@@ -80,6 +120,12 @@ define i8 @srli(i8 %a) nounwind {
 ; RV32I-NEXT:    andi a0, a0, 192
 ; RV32I-NEXT:    srli a0, a0, 6
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srli:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 192
+; RV64I-NEXT:    srli a0, a0, 6
+; RV64I-NEXT:    ret
   %1 = lshr i8 %a, 6
   ret i8 %1
 }
@@ -90,6 +136,12 @@ define i8 @srai(i8 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 29
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srai:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 61
+; RV64I-NEXT:    ret
   %1 = ashr i8 %a, 5
   ret i8 %1
 }
@@ -100,6 +152,11 @@ define i8 @add(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: add:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = add i8 %a, %b
   ret i8 %1
 }
@@ -109,6 +166,11 @@ define i8 @sub(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sub:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = sub i8 %a, %b
   ret i8 %1
 }
@@ -118,6 +180,11 @@ define i8 @sll(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sll a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sll:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = shl i8 %a, %b
   ret i8 %1
 }
@@ -131,6 +198,15 @@ define i8 @slt(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    slt a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: slt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slt a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp slt i8 %a, %b
   %2 = zext i1 %1 to i8
   ret i8 %2
@@ -143,6 +219,13 @@ define i8 @sltu(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sltu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    sltu a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = icmp ult i8 %a, %b
   %2 = zext i1 %1 to i8
   ret i8 %2
@@ -153,6 +236,11 @@ define i8 @xor(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: xor:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = xor i8 %a, %b
   ret i8 %1
 }
@@ -163,6 +251,12 @@ define i8 @srl(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: srl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    srl a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = lshr i8 %a, %b
   ret i8 %1
 }
@@ -174,6 +268,13 @@ define i8 @sra(i8 %a, i8 %b) nounwind {
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    sra a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sra:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = ashr i8 %a, %b
   ret i8 %1
 }
@@ -183,6 +284,11 @@ define i8 @or(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: or:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = or i8 %a, %b
   ret i8 %1
 }
@@ -192,6 +298,11 @@ define i8 @and(i8 %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = and i8 %a, %b
   ret i8 %1
 }

Added: llvm/trunk/test/CodeGen/RISCV/mem64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/mem64.ll?rev=347973&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/mem64.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/mem64.ll Fri Nov 30 01:38:44 2018
@@ -0,0 +1,226 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+; Check indexed and unindexed, sext, zext and anyext loads
+
+define i64 @lb(i8 *%a) nounwind {
+; RV64I-LABEL: lb:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lb a1, 0(a0)
+; RV64I-NEXT:    lb a0, 1(a0)
+; RV64I-NEXT:    ret
+  %1 = getelementptr i8, i8* %a, i32 1
+  %2 = load i8, i8* %1
+  %3 = sext i8 %2 to i64
+  ; the unused load will produce an anyext for selection
+  %4 = load volatile i8, i8* %a
+  ret i64 %3
+}
+
+define i64 @lh(i16 *%a) nounwind {
+; RV64I-LABEL: lh:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lh a1, 0(a0)
+; RV64I-NEXT:    lh a0, 4(a0)
+; RV64I-NEXT:    ret
+  %1 = getelementptr i16, i16* %a, i32 2
+  %2 = load i16, i16* %1
+  %3 = sext i16 %2 to i64
+  ; the unused load will produce an anyext for selection
+  %4 = load volatile i16, i16* %a
+  ret i64 %3
+}
+
+define i64 @lw(i32 *%a) nounwind {
+; RV64I-LABEL: lw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lw a1, 0(a0)
+; RV64I-NEXT:    lw a0, 12(a0)
+; RV64I-NEXT:    ret
+  %1 = getelementptr i32, i32* %a, i32 3
+  %2 = load i32, i32* %1
+  %3 = sext i32 %2 to i64
+  ; the unused load will produce an anyext for selection
+  %4 = load volatile i32, i32* %a
+  ret i64 %3
+}
+
+define i64 @lbu(i8 *%a) nounwind {
+; RV64I-LABEL: lbu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lbu a1, 0(a0)
+; RV64I-NEXT:    lbu a0, 4(a0)
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = getelementptr i8, i8* %a, i32 4
+  %2 = load i8, i8* %1
+  %3 = zext i8 %2 to i64
+  %4 = load volatile i8, i8* %a
+  %5 = zext i8 %4 to i64
+  %6 = add i64 %3, %5
+  ret i64 %6
+}
+
+define i64 @lhu(i16 *%a) nounwind {
+; RV64I-LABEL: lhu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lhu a1, 0(a0)
+; RV64I-NEXT:    lhu a0, 10(a0)
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = getelementptr i16, i16* %a, i32 5
+  %2 = load i16, i16* %1
+  %3 = zext i16 %2 to i64
+  %4 = load volatile i16, i16* %a
+  %5 = zext i16 %4 to i64
+  %6 = add i64 %3, %5
+  ret i64 %6
+}
+
+define i64 @lwu(i32 *%a) nounwind {
+; RV64I-LABEL: lwu:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lwu a1, 0(a0)
+; RV64I-NEXT:    lwu a0, 24(a0)
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = getelementptr i32, i32* %a, i32 6
+  %2 = load i32, i32* %1
+  %3 = zext i32 %2 to i64
+  %4 = load volatile i32, i32* %a
+  %5 = zext i32 %4 to i64
+  %6 = add i64 %3, %5
+  ret i64 %6
+}
+
+; Check indexed and unindexed stores
+
+define void @sb(i8 *%a, i8 %b) nounwind {
+; RV64I-LABEL: sb:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sb a1, 7(a0)
+; RV64I-NEXT:    sb a1, 0(a0)
+; RV64I-NEXT:    ret
+  store i8 %b, i8* %a
+  %1 = getelementptr i8, i8* %a, i32 7
+  store i8 %b, i8* %1
+  ret void
+}
+
+define void @sh(i16 *%a, i16 %b) nounwind {
+; RV64I-LABEL: sh:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sh a1, 16(a0)
+; RV64I-NEXT:    sh a1, 0(a0)
+; RV64I-NEXT:    ret
+  store i16 %b, i16* %a
+  %1 = getelementptr i16, i16* %a, i32 8
+  store i16 %b, i16* %1
+  ret void
+}
+
+define void @sw(i32 *%a, i32 %b) nounwind {
+; RV64I-LABEL: sw:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sw a1, 36(a0)
+; RV64I-NEXT:    sw a1, 0(a0)
+; RV64I-NEXT:    ret
+  store i32 %b, i32* %a
+  %1 = getelementptr i32, i32* %a, i32 9
+  store i32 %b, i32* %1
+  ret void
+}
+
+; 64-bit loads and stores
+
+define i64 @ld(i64 *%a) nounwind {
+; RV64I-LABEL: ld:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ld a1, 0(a0)
+; RV64I-NEXT:    ld a0, 80(a0)
+; RV64I-NEXT:    ret
+  %1 = getelementptr i64, i64* %a, i32 10
+  %2 = load i64, i64* %1
+  %3 = load volatile i64, i64* %a
+  ret i64 %2
+}
+
+define void @sd(i64 *%a, i64 %b) nounwind {
+; RV64I-LABEL: sd:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sd a1, 88(a0)
+; RV64I-NEXT:    sd a1, 0(a0)
+; RV64I-NEXT:    ret
+  store i64 %b, i64* %a
+  %1 = getelementptr i64, i64* %a, i32 11
+  store i64 %b, i64* %1
+  ret void
+}
+
+; Check load and store to an i1 location
+define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
+; RV64I-LABEL: load_sext_zext_anyext_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lb a1, 0(a0)
+; RV64I-NEXT:    lbu a1, 1(a0)
+; RV64I-NEXT:    lbu a0, 2(a0)
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  ; sextload i1
+  %1 = getelementptr i1, i1* %a, i32 1
+  %2 = load i1, i1* %1
+  %3 = sext i1 %2 to i64
+  ; zextload i1
+  %4 = getelementptr i1, i1* %a, i32 2
+  %5 = load i1, i1* %4
+  %6 = zext i1 %5 to i64
+  %7 = add i64 %3, %6
+  ; extload i1 (anyext). Produced as the load is unused.
+  %8 = load volatile i1, i1* %a
+  ret i64 %7
+}
+
+define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
+; RV64I-LABEL: load_sext_zext_anyext_i1_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lb a1, 0(a0)
+; RV64I-NEXT:    lbu a1, 1(a0)
+; RV64I-NEXT:    lbu a0, 2(a0)
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  ; sextload i1
+  %1 = getelementptr i1, i1* %a, i32 1
+  %2 = load i1, i1* %1
+  %3 = sext i1 %2 to i16
+  ; zextload i1
+  %4 = getelementptr i1, i1* %a, i32 2
+  %5 = load i1, i1* %4
+  %6 = zext i1 %5 to i16
+  %7 = add i16 %3, %6
+  ; extload i1 (anyext). Produced as the load is unused.
+  %8 = load volatile i1, i1* %a
+  ret i16 %7
+}
+
+; Check load and store to a global
+ at G = global i64 0
+
+define i64 @ld_sd_global(i64 %a) nounwind {
+; RV64I-LABEL: ld_sd_global:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, %hi(G)
+; RV64I-NEXT:    ld a1, %lo(G)(a2)
+; RV64I-NEXT:    sd a0, %lo(G)(a2)
+; RV64I-NEXT:    addi a2, a2, %lo(G)
+; RV64I-NEXT:    ld a3, 72(a2)
+; RV64I-NEXT:    sd a0, 72(a2)
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+  %1 = load volatile i64, i64* @G
+  store i64 %a, i64* @G
+  %2 = getelementptr i64, i64* @G, i64 9
+  %3 = load volatile i64, i64* %2
+  store i64 %a, i64* %2
+  ret i64 %1
+}

Added: llvm/trunk/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll?rev=347973&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll Fri Nov 30 01:38:44 2018
@@ -0,0 +1,1696 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+; The patterns for the 'W' suffixed RV64I instructions have the potential of
+; missing cases. This file checks all the variants of
+; sign-extended/zero-extended/any-extended inputs and outputs.
+
+; The 64-bit add instruction can safely be used when the result is anyext.
+
+define i32 @aext_addw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_addw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_addw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_addw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_addw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_addw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_addw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_addw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_addw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_addw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+; Always select addw when a signext result is required.
+
+define signext i32 @sext_addw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_addw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_addw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_addw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_addw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_addw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_addw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_addw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_addw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_addw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+; 64-bit add followed by zero-extension is a safe option when a zeroext result
+; is required.
+
+define zeroext i32 @zext_addw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_addw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_addw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_addw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_addw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_addw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_addw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_addw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_addw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_addw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, %b
+  ret i32 %1
+}
+
+; 64-bit sub is safe for an anyext result.
+
+define i32 @aext_subw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_subw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_subw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_subw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_subw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_subw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_subw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_subw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_subw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_subw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+; Always select subw for a signext result.
+
+define signext i32 @sext_subw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_subw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_subw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_subw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_subw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_subw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_subw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_subw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_subw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_subw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+; 64-bit sub followed by zero-extension is safe for a zeroext result.
+
+define zeroext i32 @zext_subw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_subw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_subw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_subw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_subw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_subw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_subw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_subw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_subw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_subw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = sub i32 %a, %b
+  ret i32 %1
+}
+
+; 64-bit sll is a safe choice for an anyext result.
+
+define i32 @aext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sllw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sllw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sllw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sllw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sllw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sllw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sllw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sllw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sllw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+; Select sllw for all cases witha signext result.
+
+define signext i32 @sext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sllw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sllw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sllw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sllw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sllw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sllw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sllw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sllw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sllw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+; 64-bit sll followed by zero-extension for a zeroext result.
+
+define zeroext i32 @zext_sllw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sllw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sllw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sllw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sllw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sllw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sllw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sllw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sllw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sllw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sllw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, %b
+  ret i32 %1
+}
+
+; srlw must always be selected for 32-bit lshr with variable arguments.
+
+define i32 @aext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_srlw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_srlw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_srlw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_srlw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_srlw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_srlw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_srlw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_srlw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_srlw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_srlw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_srlw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_srlw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_srlw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_srlw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_srlw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_srlw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_srlw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_srlw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_srlw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_srlw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_srlw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_srlw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_srlw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_srlw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_srlw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_srlw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srlw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_srlw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, %b
+  ret i32 %1
+}
+
+; sraw must be selected if the first operand is not sign-extended.
+
+define i32 @aext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sraw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sraw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sraw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sraw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sraw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sraw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: aext_sraw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: aext_sraw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @aext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: aext_sraw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sraw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sraw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sraw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sraw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sraw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sraw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: sext_sraw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: sext_sraw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define signext i32 @sext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: sext_sraw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_aext_aext(i32 %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sraw_aext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_aext_sext(i32 %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sraw_aext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sraw_aext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_sext_aext(i32 signext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sraw_sext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sraw_sext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sraw_sext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sra a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
+; RV64I-LABEL: zext_sraw_zext_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
+; RV64I-LABEL: zext_sraw_zext_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
+; RV64I-LABEL: zext_sraw_zext_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraw a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, %b
+  ret i32 %1
+}
+
+; addiw should be selected when there is a signext result.
+
+define i32 @aext_addiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: aext_addiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 1
+  ret i32 %1
+}
+
+define i32 @aext_addiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: aext_addiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 2
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 2
+  ret i32 %1
+}
+
+define i32 @aext_addiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: aext_addiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 3
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 3
+  ret i32 %1
+}
+
+define signext i32 @sext_addiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: sext_addiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addiw a0, a0, 4
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 4
+  ret i32 %1
+}
+
+define signext i32 @sext_addiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: sext_addiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addiw a0, a0, 5
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 5
+  ret i32 %1
+}
+
+define signext i32 @sext_addiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: sext_addiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addiw a0, a0, 6
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 6
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: zext_addiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 7
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 7
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: zext_addiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 8
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 8
+  ret i32 %1
+}
+
+define zeroext i32 @zext_addiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: zext_addiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 9
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = add i32 %a, 9
+  ret i32 %1
+}
+
+; slliw should be selected whenever the return is signext.
+
+define i32 @aext_slliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: aext_slliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 1
+  ret i32 %1
+}
+
+define i32 @aext_slliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: aext_slliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 2
+  ret i32 %1
+}
+
+define i32 @aext_slliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: aext_slliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 3
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 3
+  ret i32 %1
+}
+
+define signext i32 @sext_slliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: sext_slliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 4
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 4
+  ret i32 %1
+}
+
+define signext i32 @sext_slliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: sext_slliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 5
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 5
+  ret i32 %1
+}
+
+define signext i32 @sext_slliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: sext_slliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 6
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 6
+  ret i32 %1
+}
+
+; TODO: the constant shifts could be combined.
+
+define zeroext i32 @zext_slliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: zext_slliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 7
+  ret i32 %1
+}
+
+define zeroext i32 @zext_slliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: zext_slliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 8
+  ret i32 %1
+}
+
+define zeroext i32 @zext_slliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: zext_slliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 9
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = shl i32 %a, 9
+  ret i32 %1
+}
+
+; srliw should be selected unless the first operand is zeroext, when srli is
+; equivalent.
+
+define i32 @aext_srliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: aext_srliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 1
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 1
+  ret i32 %1
+}
+
+define i32 @aext_srliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: aext_srliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 2
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 2
+  ret i32 %1
+}
+
+define i32 @aext_srliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: aext_srliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 3
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 3
+  ret i32 %1
+}
+
+define signext i32 @sext_srliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: sext_srliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 4
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 4
+  ret i32 %1
+}
+
+define signext i32 @sext_srliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: sext_srliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 5
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 5
+  ret i32 %1
+}
+
+define signext i32 @sext_srliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: sext_srliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 6
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 6
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srliw_aext(i32 %a) nounwind {
+; RV64I-LABEL: zext_srliw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 7
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 7
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srliw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: zext_srliw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 8
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 8
+  ret i32 %1
+}
+
+define zeroext i32 @zext_srliw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: zext_srliw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 9
+; RV64I-NEXT:    ret
+  %1 = lshr i32 %a, 9
+  ret i32 %1
+}
+
+; srai is equivalent to sraiw if the first operand is sign-extended.
+
+define i32 @aext_sraiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: aext_sraiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 1
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 1
+  ret i32 %1
+}
+
+define i32 @aext_sraiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: aext_sraiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a0, a0, 2
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 2
+  ret i32 %1
+}
+
+define i32 @aext_sraiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: aext_sraiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 3
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 3
+  ret i32 %1
+}
+
+define signext i32 @sext_sraiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: sext_sraiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 4
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 4
+  ret i32 %1
+}
+
+define signext i32 @sext_sraiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: sext_sraiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srai a0, a0, 5
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 5
+  ret i32 %1
+}
+
+define signext i32 @sext_sraiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: sext_sraiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sraiw a0, a0, 6
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 6
+  ret i32 %1
+}
+
+; TODO: sraiw could be selected rather than sext.w and srli. Alternatively,
+; the srli could be merged in to the shifts used for zero-extension.
+
+define zeroext i32 @zext_sraiw_aext(i32 %a) nounwind {
+; RV64I-LABEL: zext_sraiw_aext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srli a0, a0, 7
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 7
+  ret i32 %1
+}
+
+define zeroext i32 @zext_sraiw_sext(i32 signext %a) nounwind {
+; RV64I-LABEL: zext_sraiw_sext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 8
+  ret i32 %1
+}
+
+; TODO: sraiw could be selected rather than sext.w and srli. Alternatively,
+; the srli could be merged in to the shifts used for zero-extension.
+
+define zeroext i32 @zext_sraiw_zext(i32 zeroext %a) nounwind {
+; RV64I-LABEL: zext_sraiw_zext:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    srli a0, a0, 9
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
+  %1 = ashr i32 %a, 9
+  ret i32 %1
+}

Modified: llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll?rev=347973&r1=347972&r2=347973&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/sext-zext-trunc.ll Fri Nov 30 01:38:44 2018
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
 
 define i8 @sext_i1_to_i8(i1 %a) {
 ; RV32I-LABEL: sext_i1_to_i8:
@@ -8,6 +10,12 @@ define i8 @sext_i1_to_i8(i1 %a) {
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i1_to_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
   %1 = sext i1 %a to i8
   ret i8 %1
 }
@@ -18,6 +26,12 @@ define i16 @sext_i1_to_i16(i1 %a) {
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i1_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
   %1 = sext i1 %a to i16
   ret i16 %1
 }
@@ -28,6 +42,12 @@ define i32 @sext_i1_to_i32(i1 %a) {
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i1_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
   %1 = sext i1 %a to i32
   ret i32 %1
 }
@@ -39,6 +59,12 @@ define i64 @sext_i1_to_i64(i1 %a) {
 ; RV32I-NEXT:    neg a0, a0
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i1_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
   %1 = sext i1 %a to i64
   ret i64 %1
 }
@@ -49,6 +75,12 @@ define i16 @sext_i8_to_i16(i8 %a) {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i8_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ret
   %1 = sext i8 %a to i16
   ret i16 %1
 }
@@ -59,6 +91,12 @@ define i32 @sext_i8_to_i32(i8 %a) {
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i8_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ret
   %1 = sext i8 %a to i32
   ret i32 %1
 }
@@ -70,6 +108,12 @@ define i64 @sext_i8_to_i64(i8 %a) {
 ; RV32I-NEXT:    srai a0, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i8_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    ret
   %1 = sext i8 %a to i64
   ret i64 %1
 }
@@ -80,6 +124,12 @@ define i32 @sext_i16_to_i32(i16 %a) {
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i16_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ret
   %1 = sext i16 %a to i32
   ret i32 %1
 }
@@ -91,6 +141,12 @@ define i64 @sext_i16_to_i64(i16 %a) {
 ; RV32I-NEXT:    srai a0, a1, 16
 ; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i16_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    ret
   %1 = sext i16 %a to i64
   ret i64 %1
 }
@@ -100,6 +156,11 @@ define i64 @sext_i32_to_i64(i32 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srai a1, a0, 31
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: sext_i32_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
   %1 = sext i32 %a to i64
   ret i64 %1
 }
@@ -109,6 +170,11 @@ define i8 @zext_i1_to_i8(i1 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i1_to_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = zext i1 %a to i8
   ret i8 %1
 }
@@ -118,6 +184,11 @@ define i16 @zext_i1_to_i16(i1 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i1_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = zext i1 %a to i16
   ret i16 %1
 }
@@ -127,6 +198,11 @@ define i32 @zext_i1_to_i32(i1 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i1_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = zext i1 %a to i32
   ret i32 %1
 }
@@ -137,6 +213,11 @@ define i64 @zext_i1_to_i64(i1 %a) {
 ; RV32I-NEXT:    andi a0, a0, 1
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i1_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
   %1 = zext i1 %a to i64
   ret i64 %1
 }
@@ -146,6 +227,11 @@ define i16 @zext_i8_to_i16(i8 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i8_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    ret
   %1 = zext i8 %a to i16
   ret i16 %1
 }
@@ -155,6 +241,11 @@ define i32 @zext_i8_to_i32(i8 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i8_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    ret
   %1 = zext i8 %a to i32
   ret i32 %1
 }
@@ -165,6 +256,11 @@ define i64 @zext_i8_to_i64(i8 %a) {
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i8_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    ret
   %1 = zext i8 %a to i64
   ret i64 %1
 }
@@ -176,6 +272,13 @@ define i32 @zext_i16_to_i32(i16 %a) {
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i16_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = zext i16 %a to i32
   ret i32 %1
 }
@@ -188,6 +291,13 @@ define i64 @zext_i16_to_i64(i16 %a) {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i16_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
   %1 = zext i16 %a to i64
   ret i64 %1
 }
@@ -197,17 +307,24 @@ define i64 @zext_i32_to_i64(i32 %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, zero
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: zext_i32_to_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    ret
   %1 = zext i32 %a to i64
   ret i64 %1
 }
 
-; TODO: should the trunc tests explicitly ensure no code is generated before
-; jalr?
-
 define i1 @trunc_i8_to_i1(i8 %a) {
 ; RV32I-LABEL: trunc_i8_to_i1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i8_to_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i8 %a to i1
   ret i1 %1
 }
@@ -216,6 +333,10 @@ define i1 @trunc_i16_to_i1(i16 %a) {
 ; RV32I-LABEL: trunc_i16_to_i1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i16_to_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i16 %a to i1
   ret i1 %1
 }
@@ -224,6 +345,10 @@ define i1 @trunc_i32_to_i1(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i32_to_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i32 %a to i1
   ret i1 %1
 }
@@ -232,6 +357,10 @@ define i1 @trunc_i64_to_i1(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i64_to_i1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i64 %a to i1
   ret i1 %1
 }
@@ -240,6 +369,10 @@ define i8 @trunc_i16_to_i8(i16 %a) {
 ; RV32I-LABEL: trunc_i16_to_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i16_to_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i16 %a to i8
   ret i8 %1
 }
@@ -248,6 +381,10 @@ define i8 @trunc_i32_to_i8(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i32_to_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i32 %a to i8
   ret i8 %1
 }
@@ -256,6 +393,10 @@ define i8 @trunc_i64_to_i8(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i64_to_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i64 %a to i8
   ret i8 %1
 }
@@ -264,6 +405,10 @@ define i16 @trunc_i32_to_i16(i32 %a) {
 ; RV32I-LABEL: trunc_i32_to_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i32_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i32 %a to i16
   ret i16 %1
 }
@@ -272,6 +417,10 @@ define i16 @trunc_i64_to_i16(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i64_to_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i64 %a to i16
   ret i16 %1
 }
@@ -280,6 +429,10 @@ define i32 @trunc_i64_to_i32(i64 %a) {
 ; RV32I-LABEL: trunc_i64_to_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: trunc_i64_to_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    ret
   %1 = trunc i64 %a to i32
   ret i32 %1
 }




More information about the llvm-commits mailing list