[llvm] r177747 - Hexagon: Add and enable memops setbit, clrbit, &, |, +, - for byte, short, and word.

Jyotsna Verma jverma at codeaurora.org
Fri Mar 22 11:41:34 PDT 2013


Author: jverma
Date: Fri Mar 22 13:41:34 2013
New Revision: 177747

URL: http://llvm.org/viewvc/llvm-project?rev=177747&view=rev
Log:
Hexagon: Add and enable memops setbit, clrbit, &,|,+,- for byte, short, and word.


Added:
    llvm/trunk/test/CodeGen/Hexagon/memops.ll
    llvm/trunk/test/CodeGen/Hexagon/memops1.ll
    llvm/trunk/test/CodeGen/Hexagon/memops2.ll
    llvm/trunk/test/CodeGen/Hexagon/memops3.ll
Modified:
    llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV4.td
    llvm/trunk/lib/Target/Hexagon/HexagonRegisterInfo.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp

Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp?rev=177747&r1=177746&r2=177747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp Fri Mar 22 13:41:34 2013
@@ -113,6 +113,46 @@ public:
   SDNode *SelectAdd(SDNode *N);
   bool isConstExtProfitable(SDNode *N) const;
 
+// XformMskToBitPosU5Imm - Returns the bit position which
+// the single bit 32 bit mask represents.
+// Used in Clr and Set bit immediate memops.
+SDValue XformMskToBitPosU5Imm(uint32_t Imm) {
+  int32_t bitPos;
+  bitPos = Log2_32(Imm);
+  assert(bitPos >= 0 && bitPos < 32 &&
+         "Constant out of range for 32 BitPos Memops");
+  return CurDAG->getTargetConstant(bitPos, MVT::i32);
+}
+
+// XformMskToBitPosU4Imm - Returns the bit position which the single bit 16 bit
+// mask represents. Used in Clr and Set bit immediate memops.
+SDValue XformMskToBitPosU4Imm(uint16_t Imm) {
+  return XformMskToBitPosU5Imm(Imm);
+}
+
+// XformMskToBitPosU3Imm - Returns the bit position which the single bit 8 bit
+// mask represents. Used in Clr and Set bit immediate memops.
+SDValue XformMskToBitPosU3Imm(uint8_t Imm) {
+  return XformMskToBitPosU5Imm(Imm);
+}
+
+// Return true if there is exactly one bit set in V, i.e., if V is one of the
+// following integers: 2^0, 2^1, ..., 2^31.
+bool ImmIsSingleBit(uint32_t v) const {
+  uint32_t c = CountPopulation_64(v);
+  // Only return true if we counted 1 bit.
+  return c == 1;
+}
+
+// XformM5ToU5Imm - Return a target constant with the specified value, of type
+// i32 where the negative literal is transformed into a positive literal for
+// use in -= memops.
+inline SDValue XformM5ToU5Imm(signed Imm) {
+   assert( (Imm >= -31 && Imm <= -1)  && "Constant out of range for Memops");
+   return CurDAG->getTargetConstant( - Imm, MVT::i32);
+}
+
+
 // XformU7ToU7M1Imm - Return a target constant decremented by 1, in range
 // [1..128], used in cmpb.gtu instructions.
 inline SDValue XformU7ToU7M1Imm(signed Imm) {

Modified: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp?rev=177747&r1=177746&r2=177747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp Fri Mar 22 13:41:34 2013
@@ -1991,46 +1991,28 @@ isValidOffset(const int Opcode, const in
     return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
       (Offset <= Hexagon_ADDI_OFFSET_MAX);
 
-  case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
-  case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
-  case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
-  case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
-  case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
-  case Hexagon::MEMw_ORr_indexed_MEM_V4 :
-  case Hexagon::MEMw_ADDi_MEM_V4 :
-  case Hexagon::MEMw_SUBi_MEM_V4 :
-  case Hexagon::MEMw_ADDr_MEM_V4 :
-  case Hexagon::MEMw_SUBr_MEM_V4 :
-  case Hexagon::MEMw_ANDr_MEM_V4 :
-  case Hexagon::MEMw_ORr_MEM_V4 :
+  case Hexagon::MemOPw_ADDi_V4 :
+  case Hexagon::MemOPw_SUBi_V4 :
+  case Hexagon::MemOPw_ADDr_V4 :
+  case Hexagon::MemOPw_SUBr_V4 :
+  case Hexagon::MemOPw_ANDr_V4 :
+  case Hexagon::MemOPw_ORr_V4 :
     return (0 <= Offset && Offset <= 255);
 
-  case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
-  case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
-  case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
-  case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
-  case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
-  case Hexagon::MEMh_ORr_indexed_MEM_V4 :
-  case Hexagon::MEMh_ADDi_MEM_V4 :
-  case Hexagon::MEMh_SUBi_MEM_V4 :
-  case Hexagon::MEMh_ADDr_MEM_V4 :
-  case Hexagon::MEMh_SUBr_MEM_V4 :
-  case Hexagon::MEMh_ANDr_MEM_V4 :
-  case Hexagon::MEMh_ORr_MEM_V4 :
+  case Hexagon::MemOPh_ADDi_V4 :
+  case Hexagon::MemOPh_SUBi_V4 :
+  case Hexagon::MemOPh_ADDr_V4 :
+  case Hexagon::MemOPh_SUBr_V4 :
+  case Hexagon::MemOPh_ANDr_V4 :
+  case Hexagon::MemOPh_ORr_V4 :
     return (0 <= Offset && Offset <= 127);
 
-  case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
-  case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
-  case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
-  case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
-  case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
-  case Hexagon::MEMb_ORr_indexed_MEM_V4 :
-  case Hexagon::MEMb_ADDi_MEM_V4 :
-  case Hexagon::MEMb_SUBi_MEM_V4 :
-  case Hexagon::MEMb_ADDr_MEM_V4 :
-  case Hexagon::MEMb_SUBr_MEM_V4 :
-  case Hexagon::MEMb_ANDr_MEM_V4 :
-  case Hexagon::MEMb_ORr_MEM_V4 :
+  case Hexagon::MemOPb_ADDi_V4 :
+  case Hexagon::MemOPb_SUBi_V4 :
+  case Hexagon::MemOPb_ADDr_V4 :
+  case Hexagon::MemOPb_SUBr_V4 :
+  case Hexagon::MemOPb_ANDr_V4 :
+  case Hexagon::MemOPb_ORr_V4 :
     return (0 <= Offset && Offset <= 63);
 
   // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
@@ -2086,44 +2068,33 @@ isMemOp(const MachineInstr *MI) const {
   switch (MI->getOpcode())
   {
     default: return false;
-    case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
-    case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
-    case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
-    case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
-    case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
-    case Hexagon::MEMw_ORr_indexed_MEM_V4 :
-    case Hexagon::MEMw_ADDi_MEM_V4 :
-    case Hexagon::MEMw_SUBi_MEM_V4 :
-    case Hexagon::MEMw_ADDr_MEM_V4 :
-    case Hexagon::MEMw_SUBr_MEM_V4 :
-    case Hexagon::MEMw_ANDr_MEM_V4 :
-    case Hexagon::MEMw_ORr_MEM_V4 :
-    case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
-    case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
-    case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
-    case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
-    case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
-    case Hexagon::MEMh_ORr_indexed_MEM_V4 :
-    case Hexagon::MEMh_ADDi_MEM_V4 :
-    case Hexagon::MEMh_SUBi_MEM_V4 :
-    case Hexagon::MEMh_ADDr_MEM_V4 :
-    case Hexagon::MEMh_SUBr_MEM_V4 :
-    case Hexagon::MEMh_ANDr_MEM_V4 :
-    case Hexagon::MEMh_ORr_MEM_V4 :
-    case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
-    case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
-    case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
-    case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
-    case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
-    case Hexagon::MEMb_ORr_indexed_MEM_V4 :
-    case Hexagon::MEMb_ADDi_MEM_V4 :
-    case Hexagon::MEMb_SUBi_MEM_V4 :
-    case Hexagon::MEMb_ADDr_MEM_V4 :
-    case Hexagon::MEMb_SUBr_MEM_V4 :
-    case Hexagon::MEMb_ANDr_MEM_V4 :
-    case Hexagon::MEMb_ORr_MEM_V4 :
-      return true;
+    case Hexagon::MemOPw_ADDi_V4 :
+    case Hexagon::MemOPw_SUBi_V4 :
+    case Hexagon::MemOPw_ADDr_V4 :
+    case Hexagon::MemOPw_SUBr_V4 :
+    case Hexagon::MemOPw_ANDr_V4 :
+    case Hexagon::MemOPw_ORr_V4 :
+    case Hexagon::MemOPh_ADDi_V4 :
+    case Hexagon::MemOPh_SUBi_V4 :
+    case Hexagon::MemOPh_ADDr_V4 :
+    case Hexagon::MemOPh_SUBr_V4 :
+    case Hexagon::MemOPh_ANDr_V4 :
+    case Hexagon::MemOPh_ORr_V4 :
+    case Hexagon::MemOPb_ADDi_V4 :
+    case Hexagon::MemOPb_SUBi_V4 :
+    case Hexagon::MemOPb_ADDr_V4 :
+    case Hexagon::MemOPb_SUBr_V4 :
+    case Hexagon::MemOPb_ANDr_V4 :
+    case Hexagon::MemOPb_ORr_V4 :
+    case Hexagon::MemOPb_SETBITi_V4:
+    case Hexagon::MemOPh_SETBITi_V4:
+    case Hexagon::MemOPw_SETBITi_V4:
+    case Hexagon::MemOPb_CLRBITi_V4:
+    case Hexagon::MemOPh_CLRBITi_V4:
+    case Hexagon::MemOPw_CLRBITi_V4:
+    return true;
   }
+  return false;
 }
 
 

Modified: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV4.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV4.td?rev=177747&r1=177746&r2=177747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV4.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV4.td Fri Mar 22 13:41:34 2013
@@ -2658,414 +2658,367 @@ def LSRd_rr_xor_V4 : MInst_acc<(outs Dou
 // MEMOP: Word, Half, Byte
 //===----------------------------------------------------------------------===//
 
+def MEMOPIMM : SDNodeXForm<imm, [{
+  // Call the transformation function XformM5ToU5Imm to get the negative
+  // immediate's positive counterpart.
+  int32_t imm = N->getSExtValue();
+  return XformM5ToU5Imm(imm);
+}]>;
+
+def MEMOPIMM_HALF : SDNodeXForm<imm, [{
+  // -1 .. -31 represented as 65535..65515
+  // assigning to a short restores our desired signed value.
+  // Call the transformation function XformM5ToU5Imm to get the negative
+  // immediate's positive counterpart.
+  int16_t imm = N->getSExtValue();
+  return XformM5ToU5Imm(imm);
+}]>;
+
+def MEMOPIMM_BYTE : SDNodeXForm<imm, [{
+  // -1 .. -31 represented as 255..235
+  // assigning to a char restores our desired signed value.
+  // Call the transformation function XformM5ToU5Imm to get the negative
+  // immediate's positive counterpart.
+  int8_t imm = N->getSExtValue();
+  return XformM5ToU5Imm(imm);
+}]>;
+
+def SETMEMIMM : SDNodeXForm<imm, [{
+   // Return the bit position we will set [0-31].
+   // As an SDNode.
+   int32_t imm = N->getSExtValue();
+   return XformMskToBitPosU5Imm(imm);
+}]>;
+
+def CLRMEMIMM : SDNodeXForm<imm, [{
+   // Return the bit position we will clear [0-31].
+   // As an SDNode.
+   // we bit negate the value first
+   int32_t imm = ~(N->getSExtValue());
+   return XformMskToBitPosU5Imm(imm);
+}]>;
+
+def SETMEMIMM_SHORT : SDNodeXForm<imm, [{
+   // Return the bit position we will set [0-15].
+   // As an SDNode.
+   int16_t imm = N->getSExtValue();
+   return XformMskToBitPosU4Imm(imm);
+}]>;
+
+def CLRMEMIMM_SHORT : SDNodeXForm<imm, [{
+   // Return the bit position we will clear [0-15].
+   // As an SDNode.
+   // we bit negate the value first
+   int16_t imm = ~(N->getSExtValue());
+   return XformMskToBitPosU4Imm(imm);
+}]>;
+
+def SETMEMIMM_BYTE : SDNodeXForm<imm, [{
+   // Return the bit position we will set [0-7].
+   // As an SDNode.
+   int8_t imm =  N->getSExtValue();
+   return XformMskToBitPosU3Imm(imm);
+}]>;
+
+def CLRMEMIMM_BYTE : SDNodeXForm<imm, [{
+   // Return the bit position we will clear [0-7].
+   // As an SDNode.
+   // we bit negate the value first
+   int8_t imm = ~(N->getSExtValue());
+   return XformMskToBitPosU3Imm(imm);
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Template class for MemOp instructions with the register value.
+//===----------------------------------------------------------------------===//
+class MemOp_rr_base <string opc, bits<2> opcBits, Operand ImmOp,
+                     string memOp, bits<2> memOpBits> :
+      MEMInst_V4<(outs),
+                 (ins IntRegs:$base, ImmOp:$offset, IntRegs:$delta),
+                 opc#"($base+#$offset)"#memOp#"$delta",
+                 []>,
+                 Requires<[HasV4T, UseMEMOP]> {
+
+    bits<5> base;
+    bits<5> delta;
+    bits<32> offset;
+    bits<6> offsetBits; // memb - u6:0 , memh - u6:1, memw - u6:2
+
+    let offsetBits = !if (!eq(opcBits, 0b00), offset{5-0},
+                     !if (!eq(opcBits, 0b01), offset{6-1},
+                     !if (!eq(opcBits, 0b10), offset{7-2},0)));
+
+    let IClass = 0b0011;
+    let Inst{27-24} = 0b1110;
+    let Inst{22-21} = opcBits;
+    let Inst{20-16} = base;
+    let Inst{13} = 0b0;
+    let Inst{12-7} = offsetBits;
+    let Inst{6-5} = memOpBits;
+    let Inst{4-0} = delta;
+}
+
+//===----------------------------------------------------------------------===//
+// Template class for MemOp instructions with the immediate value.
+//===----------------------------------------------------------------------===//
+class MemOp_ri_base <string opc, bits<2> opcBits, Operand ImmOp,
+                     string memOp, bits<2> memOpBits> :
+      MEMInst_V4 <(outs),
+                  (ins IntRegs:$base, ImmOp:$offset, u5Imm:$delta),
+                  opc#"($base+#$offset)"#memOp#"#$delta"
+                  #!if(memOpBits{1},")", ""), // clrbit, setbit - include ')'
+                  []>,
+                  Requires<[HasV4T, UseMEMOP]> {
+
+    bits<5> base;
+    bits<5> delta;
+    bits<32> offset;
+    bits<6> offsetBits; // memb - u6:0 , memh - u6:1, memw - u6:2
+
+    let offsetBits = !if (!eq(opcBits, 0b00), offset{5-0},
+                     !if (!eq(opcBits, 0b01), offset{6-1},
+                     !if (!eq(opcBits, 0b10), offset{7-2},0)));
+
+    let IClass = 0b0011;
+    let Inst{27-24} = 0b1111;
+    let Inst{22-21} = opcBits;
+    let Inst{20-16} = base;
+    let Inst{13} = 0b0;
+    let Inst{12-7} = offsetBits;
+    let Inst{6-5} = memOpBits;
+    let Inst{4-0} = delta;
+}
+
+// multiclass to define MemOp instructions with register operand.
+multiclass MemOp_rr<string opc, bits<2> opcBits, Operand ImmOp> {
+  def _ADD#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " += ", 0b00>; // add
+  def _SUB#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " -= ", 0b01>; // sub
+  def _AND#NAME#_V4 : MemOp_rr_base <opc, opcBits, ImmOp, " &= ", 0b10>; // and
+  def _OR#NAME#_V4  : MemOp_rr_base <opc, opcBits, ImmOp, " |= ", 0b11>; // or
+}
+
+// multiclass to define MemOp instructions with immediate Operand.
+multiclass MemOp_ri<string opc, bits<2> opcBits, Operand ImmOp> {
+  def _ADD#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " += ", 0b00 >;
+  def _SUB#NAME#_V4 : MemOp_ri_base <opc, opcBits, ImmOp, " -= ", 0b01 >;
+  def _CLRBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =clrbit(", 0b10>;
+  def _SETBIT#NAME#_V4 : MemOp_ri_base<opc, opcBits, ImmOp, " =setbit(", 0b11>;
+}
+
+multiclass MemOp_base <string opc, bits<2> opcBits, Operand ImmOp> {
+  defm r : MemOp_rr <opc, opcBits, ImmOp>;
+  defm i : MemOp_ri <opc, opcBits, ImmOp>;
+}
+
+// Define MemOp instructions.
+let isExtendable = 1, opExtendable = 1, isExtentSigned = 0,
+validSubTargets =HasV4SubT in {
+  let opExtentBits = 6, accessSize = ByteAccess in
+  defm MemOPb : MemOp_base <"memb", 0b00, u6_0Ext>;
+
+  let opExtentBits = 7, accessSize = HalfWordAccess in
+  defm MemOPh : MemOp_base <"memh", 0b01, u6_1Ext>;
+
+  let opExtentBits = 8, accessSize = WordAccess in
+  defm MemOPw : MemOp_base <"memw", 0b10, u6_2Ext>;
+}
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'Def Pats' for ALU operations on the memory
+// Here value used for the ALU operation is an immediate value.
+// mem[bh](Rs+#0) += #U5
+// mem[bh](Rs+#u6) += #U5
+//===----------------------------------------------------------------------===//
+
+multiclass MemOpi_u5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred,
+                          InstHexagon MI, SDNode OpNode> {
+  let AddedComplexity = 180 in
+  def : Pat < (stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend),
+                    IntRegs:$addr),
+              (MI IntRegs:$addr, #0, u5ImmPred:$addend )>;
+
+  let AddedComplexity = 190 in
+  def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, ExtPred:$offset)),
+                     u5ImmPred:$addend),
+             (add IntRegs:$base, ExtPred:$offset)),
+       (MI IntRegs:$base, ExtPred:$offset, u5ImmPred:$addend)>;
+}
+
+multiclass MemOpi_u5ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf ExtPred,
+                          InstHexagon addMI, InstHexagon subMI> {
+  defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, addMI, add>;
+  defm : MemOpi_u5Pats<ldOp, stOp, ExtPred, subMI, sub>;
+}
+
+multiclass MemOpi_u5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
+  // Half Word
+  defm : MemOpi_u5ALUOp <ldOpHalf, truncstorei16, u6_1ExtPred,
+                         MemOPh_ADDi_V4, MemOPh_SUBi_V4>;
+  // Byte
+  defm : MemOpi_u5ALUOp <ldOpByte, truncstorei8, u6ExtPred,
+                         MemOPb_ADDi_V4, MemOPb_SUBi_V4>;
+}
+
+let Predicates = [HasV4T, UseMEMOP] in {
+  defm : MemOpi_u5ExtType<zextloadi8, zextloadi16>; // zero extend
+  defm : MemOpi_u5ExtType<sextloadi8, sextloadi16>; // sign extend
+  defm : MemOpi_u5ExtType<extloadi8,  extloadi16>;  // any extend
+
+  // Word
+  defm : MemOpi_u5ALUOp <load, store, u6_2ExtPred, MemOPw_ADDi_V4,
+                         MemOPw_SUBi_V4>;
+}
+
+//===----------------------------------------------------------------------===//
+// multiclass to define 'Def Pats' for ALU operations on the memory.
+// Here value used for the ALU operation is a negative value.
+// mem[bh](Rs+#0) += #m5
+// mem[bh](Rs+#u6) += #m5
+//===----------------------------------------------------------------------===//
+
+multiclass MemOpi_m5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
+                          PatLeaf immPred, ComplexPattern addrPred,
+                          SDNodeXForm xformFunc, InstHexagon MI> {
+  let AddedComplexity = 190 in
+  def : Pat <(stOp (add (ldOp IntRegs:$addr), immPred:$subend),
+                   IntRegs:$addr),
+             (MI IntRegs:$addr, #0, (xformFunc immPred:$subend) )>;
+
+  let AddedComplexity = 195 in
+  def : Pat<(stOp (add (ldOp (add IntRegs:$base, extPred:$offset)),
+                       immPred:$subend),
+                  (add IntRegs:$base, extPred:$offset)),
+            (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$subend))>;
+}
+
+multiclass MemOpi_m5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
+  // Half Word
+  defm : MemOpi_m5Pats <ldOpHalf, truncstorei16, u6_1ExtPred, m5HImmPred,
+                        ADDRriU6_1, MEMOPIMM_HALF, MemOPh_SUBi_V4>;
+  // Byte
+  defm : MemOpi_m5Pats <ldOpByte, truncstorei8, u6ExtPred, m5BImmPred,
+                        ADDRriU6_0, MEMOPIMM_BYTE, MemOPb_SUBi_V4>;
+}
+
+let Predicates = [HasV4T, UseMEMOP] in {
+  defm : MemOpi_m5ExtType<zextloadi8, zextloadi16>; // zero extend
+  defm : MemOpi_m5ExtType<sextloadi8, sextloadi16>; // sign extend
+  defm : MemOpi_m5ExtType<extloadi8,  extloadi16>;  // any extend
+
+  // Word
+  defm : MemOpi_m5Pats <load, store, u6_2ExtPred, m5ImmPred,
+                          ADDRriU6_2, MEMOPIMM, MemOPw_SUBi_V4>;
+}
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'def Pats' for bit operations on the memory.
+// mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
+// mem[bhw](Rs+#u6) = [clrbit|setbit](#U5)
+//===----------------------------------------------------------------------===//
+
+multiclass MemOpi_bitPats <PatFrag ldOp, PatFrag stOp, PatLeaf immPred,
+                     PatLeaf extPred, ComplexPattern addrPred,
+                     SDNodeXForm xformFunc, InstHexagon MI, SDNode OpNode> {
+
+  // mem[bhw](Rs+#u6:[012]) = [clrbit|setbit](#U5)
+  let AddedComplexity = 250 in
+  def : Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
+                          immPred:$bitend),
+                  (add IntRegs:$base, extPred:$offset)),
+            (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>;
+
+  // mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
+  let AddedComplexity = 225 in
+  def : Pat <(stOp (OpNode (ldOp addrPred:$addr), immPred:$bitend),
+                   addrPred:$addr),
+             (MI IntRegs:$addr, #0, (xformFunc immPred:$bitend))>;
+}
+
+multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
+  // Byte - clrbit
+  defm : MemOpi_bitPats<ldOpByte, truncstorei8, Clr3ImmPred, u6ExtPred,
+                       ADDRriU6_0, CLRMEMIMM_BYTE, MemOPb_CLRBITi_V4, and>;
+  // Byte - setbit
+  defm : MemOpi_bitPats<ldOpByte, truncstorei8, Set3ImmPred,  u6ExtPred,
+                       ADDRriU6_0, SETMEMIMM_BYTE, MemOPb_SETBITi_V4, or>;
+  // Half Word - clrbit
+  defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Clr4ImmPred, u6_1ExtPred,
+                       ADDRriU6_1, CLRMEMIMM_SHORT, MemOPh_CLRBITi_V4, and>;
+  // Half Word - setbit
+  defm : MemOpi_bitPats<ldOpHalf, truncstorei16, Set4ImmPred, u6_1ExtPred,
+                       ADDRriU6_1, SETMEMIMM_SHORT, MemOPh_SETBITi_V4, or>;
+}
+
+let Predicates = [HasV4T, UseMEMOP] in {
+  // mem[bh](Rs+#0) = [clrbit|setbit](#U5)
+  // mem[bh](Rs+#u6:[01]) = [clrbit|setbit](#U5)
+  defm : MemOpi_bitExtType<zextloadi8, zextloadi16>; // zero extend
+  defm : MemOpi_bitExtType<sextloadi8, sextloadi16>; // sign extend
+  defm : MemOpi_bitExtType<extloadi8,  extloadi16>;  // any extend
+
+  // memw(Rs+#0) = [clrbit|setbit](#U5)
+  // memw(Rs+#u6:2) = [clrbit|setbit](#U5)
+  defm : MemOpi_bitPats<load, store, Clr5ImmPred, u6_2ExtPred, ADDRriU6_2,
+                       CLRMEMIMM, MemOPw_CLRBITi_V4, and>;
+  defm : MemOpi_bitPats<load, store, Set5ImmPred, u6_2ExtPred, ADDRriU6_2,
+                       SETMEMIMM, MemOPw_SETBITi_V4, or>;
+}
+
+//===----------------------------------------------------------------------===//
+// Multiclass to define 'def Pats' for ALU operations on the memory
+// where addend is a register.
+// mem[bhw](Rs+#0) [+-&|]= Rt
+// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
 //===----------------------------------------------------------------------===//
-// MEMOP: Word
-//
-//  Implemented:
-//     MEMw_ADDi_indexed_V4  : memw(Rs+#u6:2)+=#U5
-//     MEMw_SUBi_indexed_V4  : memw(Rs+#u6:2)-=#U5
-//     MEMw_ADDr_indexed_V4  : memw(Rs+#u6:2)+=Rt
-//     MEMw_SUBr_indexed_V4  : memw(Rs+#u6:2)-=Rt
-//     MEMw_CLRr_indexed_V4  : memw(Rs+#u6:2)&=Rt
-//     MEMw_SETr_indexed_V4  : memw(Rs+#u6:2)|=Rt
-//     MEMw_ADDi_V4          : memw(Rs+#u6:2)+=#U5
-//     MEMw_SUBi_V4          : memw(Rs+#u6:2)-=#U5
-//     MEMw_ADDr_V4          : memw(Rs+#u6:2)+=Rt
-//     MEMw_SUBr_V4          : memw(Rs+#u6:2)-=Rt
-//     MEMw_CLRr_V4          : memw(Rs+#u6:2)&=Rt
-//     MEMw_SETr_V4          : memw(Rs+#u6:2)|=Rt
-//
-//   Not implemented:
-//     MEMw_CLRi_indexed_V4  : memw(Rs+#u6:2)=clrbit(#U5)
-//     MEMw_SETi_indexed_V4  : memw(Rs+#u6:2)=setbit(#U5)
-//     MEMw_CLRi_V4          : memw(Rs+#u6:2)=clrbit(#U5)
-//     MEMw_SETi_V4          : memw(Rs+#u6:2)=setbit(#U5)
-//===----------------------------------------------------------------------===//
-
-
-
-// memw(Rs+#u6:2) += #U5
-let AddedComplexity = 30 in
-def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend),
-            "memw($base+#$offset) += #$addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) -= #U5
-let AddedComplexity = 30 in
-def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend),
-            "memw($base+#$offset) -= #$subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) += Rt
-let AddedComplexity = 30 in
-def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend),
-            "memw($base+#$offset) += $addend",
-            [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
-                         (i32 IntRegs:$addend)),
-                    (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) -= Rt
-let AddedComplexity = 30 in
-def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend),
-            "memw($base+#$offset) -= $subend",
-            [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
-                         (i32 IntRegs:$subend)),
-                    (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) &= Rt
-let AddedComplexity = 30 in
-def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend),
-            "memw($base+#$offset) &= $andend",
-            [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
-                         (i32 IntRegs:$andend)),
-                    (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) |= Rt
-let AddedComplexity = 30 in
-def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend),
-            "memw($base+#$offset) |= $orend",
-            [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
-                        (i32 IntRegs:$orend)),
-                    (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) += #U5
-let AddedComplexity = 30 in
-def MEMw_ADDi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$addend),
-            "memw($addr) += $addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) -= #U5
-let AddedComplexity = 30 in
-def MEMw_SUBi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$subend),
-            "memw($addr) -= $subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) += Rt
-let AddedComplexity = 30 in
-def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$addend),
-            "memw($addr) += $addend",
-            [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)),
-                    ADDRriU6_2:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) -= Rt
-let AddedComplexity = 30 in
-def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$subend),
-            "memw($addr) -= $subend",
-            [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)),
-                    ADDRriU6_2:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) &= Rt
-let AddedComplexity = 30 in
-def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$andend),
-            "memw($addr) &= $andend",
-            [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)),
-                    ADDRriU6_2:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memw(Rs+#u6:2) |= Rt
-let AddedComplexity = 30 in
-def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$orend),
-            "memw($addr) |= $orend",
-            [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)),
-                    ADDRriU6_2:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-//===----------------------------------------------------------------------===//
-// MEMOP: Halfword
-//
-//  Implemented:
-//     MEMh_ADDi_indexed_V4  : memw(Rs+#u6:2)+=#U5
-//     MEMh_SUBi_indexed_V4  : memw(Rs+#u6:2)-=#U5
-//     MEMh_ADDr_indexed_V4  : memw(Rs+#u6:2)+=Rt
-//     MEMh_SUBr_indexed_V4  : memw(Rs+#u6:2)-=Rt
-//     MEMh_CLRr_indexed_V4  : memw(Rs+#u6:2)&=Rt
-//     MEMh_SETr_indexed_V4  : memw(Rs+#u6:2)|=Rt
-//     MEMh_ADDi_V4          : memw(Rs+#u6:2)+=#U5
-//     MEMh_SUBi_V4          : memw(Rs+#u6:2)-=#U5
-//     MEMh_ADDr_V4          : memw(Rs+#u6:2)+=Rt
-//     MEMh_SUBr_V4          : memw(Rs+#u6:2)-=Rt
-//     MEMh_CLRr_V4          : memw(Rs+#u6:2)&=Rt
-//     MEMh_SETr_V4          : memw(Rs+#u6:2)|=Rt
-//
-//   Not implemented:
-//     MEMh_CLRi_indexed_V4  : memw(Rs+#u6:2)=clrbit(#U5)
-//     MEMh_SETi_indexed_V4  : memw(Rs+#u6:2)=setbit(#U5)
-//     MEMh_CLRi_V4          : memw(Rs+#u6:2)=clrbit(#U5)
-//     MEMh_SETi_V4          : memw(Rs+#u6:2)=setbit(#U5)
-//===----------------------------------------------------------------------===//
-
-
-// memh(Rs+#u6:1) += #U5
-let AddedComplexity = 30 in
-def MEMh_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$addend),
-            "memh($base+#$offset) += $addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) -= #U5
-let AddedComplexity = 30 in
-def MEMh_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$subend),
-            "memh($base+#$offset) -= $subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) += Rt
-let AddedComplexity = 30 in
-def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend),
-            "memh($base+#$offset) += $addend",
-            [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base),
-                                                   u6_1ImmPred:$offset)),
-                                 (i32 IntRegs:$addend)),
-                            (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) -= Rt
-let AddedComplexity = 30 in
-def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend),
-            "memh($base+#$offset) -= $subend",
-            [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base),
-                                                   u6_1ImmPred:$offset)),
-                                 (i32 IntRegs:$subend)),
-                            (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) &= Rt
-let AddedComplexity = 30 in
-def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend),
-            "memh($base+#$offset) += $andend",
-            [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base),
-                                                   u6_1ImmPred:$offset)),
-                                 (i32 IntRegs:$andend)),
-                            (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) |= Rt
-let AddedComplexity = 30 in
-def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend),
-            "memh($base+#$offset) |= $orend",
-            [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base),
-                                              u6_1ImmPred:$offset)),
-                             (i32 IntRegs:$orend)),
-                            (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) += #U5
-let AddedComplexity = 30 in
-def MEMh_ADDi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$addend),
-            "memh($addr) += $addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) -= #U5
-let AddedComplexity = 30 in
-def MEMh_SUBi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$subend),
-            "memh($addr) -= $subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) += Rt
-let AddedComplexity = 30 in
-def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$addend),
-            "memh($addr) += $addend",
-            [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr),
-                                 (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) -= Rt
-let AddedComplexity = 30 in
-def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$subend),
-            "memh($addr) -= $subend",
-            [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr),
-                                 (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) &= Rt
-let AddedComplexity = 30 in
-def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$andend),
-            "memh($addr) &= $andend",
-            [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr),
-                                 (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memh(Rs+#u6:1) |= Rt
-let AddedComplexity = 30 in
-def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$orend),
-            "memh($addr) |= $orend",
-            [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr),
-                                (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-
-//===----------------------------------------------------------------------===//
-// MEMOP: Byte
-//
-//  Implemented:
-//     MEMb_ADDi_indexed_V4  : memb(Rs+#u6:0)+=#U5
-//     MEMb_SUBi_indexed_V4  : memb(Rs+#u6:0)-=#U5
-//     MEMb_ADDr_indexed_V4  : memb(Rs+#u6:0)+=Rt
-//     MEMb_SUBr_indexed_V4  : memb(Rs+#u6:0)-=Rt
-//     MEMb_CLRr_indexed_V4  : memb(Rs+#u6:0)&=Rt
-//     MEMb_SETr_indexed_V4  : memb(Rs+#u6:0)|=Rt
-//     MEMb_ADDi_V4          : memb(Rs+#u6:0)+=#U5
-//     MEMb_SUBi_V4          : memb(Rs+#u6:0)-=#U5
-//     MEMb_ADDr_V4          : memb(Rs+#u6:0)+=Rt
-//     MEMb_SUBr_V4          : memb(Rs+#u6:0)-=Rt
-//     MEMb_CLRr_V4          : memb(Rs+#u6:0)&=Rt
-//     MEMb_SETr_V4          : memb(Rs+#u6:0)|=Rt
-//
-//   Not implemented:
-//     MEMb_CLRi_indexed_V4  : memb(Rs+#u6:0)=clrbit(#U5)
-//     MEMb_SETi_indexed_V4  : memb(Rs+#u6:0)=setbit(#U5)
-//     MEMb_CLRi_V4          : memb(Rs+#u6:0)=clrbit(#U5)
-//     MEMb_SETi_V4          : memb(Rs+#u6:0)=setbit(#U5)
-//===----------------------------------------------------------------------===//
-
-// memb(Rs+#u6:0) += #U5
-let AddedComplexity = 30 in
-def MEMb_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$addend),
-            "memb($base+#$offset) += $addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) -= #U5
-let AddedComplexity = 30 in
-def MEMb_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$subend),
-            "memb($base+#$offset) -= $subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) += Rt
-let AddedComplexity = 30 in
-def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend),
-            "memb($base+#$offset) += $addend",
-            [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
-                                                 u6_0ImmPred:$offset)),
-                                (i32 IntRegs:$addend)),
-                           (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) -= Rt
-let AddedComplexity = 30 in
-def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend),
-            "memb($base+#$offset) -= $subend",
-            [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base),
-                                                 u6_0ImmPred:$offset)),
-                                (i32 IntRegs:$subend)),
-                           (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) &= Rt
-let AddedComplexity = 30 in
-def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend),
-            "memb($base+#$offset) += $andend",
-            [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base),
-                                                 u6_0ImmPred:$offset)),
-                                (i32 IntRegs:$andend)),
-                           (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) |= Rt
-let AddedComplexity = 30 in
-def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
-            (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend),
-            "memb($base+#$offset) |= $orend",
-            [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base),
-                                                u6_0ImmPred:$offset)),
-                               (i32 IntRegs:$orend)),
-                           (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) += #U5
-let AddedComplexity = 30 in
-def MEMb_ADDi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$addend),
-            "memb($addr) += $addend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) -= #U5
-let AddedComplexity = 30 in
-def MEMb_SUBi_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, u5Imm:$subend),
-            "memb($addr) -= $subend",
-            []>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) += Rt
-let AddedComplexity = 30 in
-def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$addend),
-            "memb($addr) += $addend",
-            [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr),
-                                (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) -= Rt
-let AddedComplexity = 30 in
-def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$subend),
-            "memb($addr) -= $subend",
-            [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr),
-                                (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) &= Rt
-let AddedComplexity = 30 in
-def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$andend),
-            "memb($addr) &= $andend",
-            [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr),
-                                (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
-
-// memb(Rs+#u6:0) |= Rt
-let AddedComplexity = 30 in
-def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
-            (ins MEMri:$addr, IntRegs:$orend),
-            "memb($addr) |= $orend",
-            [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr),
-                               (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>,
-            Requires<[HasV4T, UseMEMOP]>;
 
+multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, ComplexPattern addrPred,
+                     PatLeaf extPred, InstHexagon MI, SDNode OpNode> {
+  let AddedComplexity = 141 in
+  // mem[bhw](Rs+#0) [+-&|]= Rt
+  def : Pat <(stOp (OpNode (ldOp addrPred:$addr), (i32 IntRegs:$addend)),
+                   addrPred:$addr),
+             (MI IntRegs:$addr, #0, (i32 IntRegs:$addend) )>;
+
+  // mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
+  let AddedComplexity = 150 in
+  def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
+                           (i32 IntRegs:$orend)),
+                   (add IntRegs:$base, extPred:$offset)),
+             (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend) )>;
+}
+
+multiclass MemOPr_ALUOp<PatFrag ldOp, PatFrag stOp,
+                        ComplexPattern addrPred, PatLeaf extPred,
+                        InstHexagon addMI, InstHexagon subMI,
+                        InstHexagon andMI, InstHexagon orMI > {
+
+  defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, addMI, add>;
+  defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, subMI, sub>;
+  defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, andMI, and>;
+  defm : MemOpr_Pats <ldOp, stOp, addrPred, extPred, orMI,  or>;
+}
+
+multiclass MemOPr_ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
+  // Half Word
+  defm : MemOPr_ALUOp <ldOpHalf, truncstorei16, ADDRriU6_1, u6_1ExtPred,
+                       MemOPh_ADDr_V4, MemOPh_SUBr_V4,
+                       MemOPh_ANDr_V4, MemOPh_ORr_V4>;
+  // Byte
+  defm : MemOPr_ALUOp <ldOpByte, truncstorei8, ADDRriU6_0, u6ExtPred,
+                       MemOPb_ADDr_V4, MemOPb_SUBr_V4,
+                       MemOPb_ANDr_V4, MemOPb_ORr_V4>;
+}
+
+// Define 'def Pats' for MemOps with register addend.
+let Predicates = [HasV4T, UseMEMOP] in {
+  // Byte, Half Word
+  defm : MemOPr_ExtType<zextloadi8, zextloadi16>; // zero extend
+  defm : MemOPr_ExtType<sextloadi8, sextloadi16>; // sign extend
+  defm : MemOPr_ExtType<extloadi8,  extloadi16>;  // any extend
+  // Word
+  defm : MemOPr_ALUOp <load, store, ADDRriU6_2, u6_2ExtPred, MemOPw_ADDr_V4,
+                       MemOPw_SUBr_V4, MemOPw_ANDr_V4, MemOPw_ORr_V4 >;
+}
 
 //===----------------------------------------------------------------------===//
 // XTYPE/PRED +

Modified: llvm/trunk/lib/Target/Hexagon/HexagonRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonRegisterInfo.cpp?rev=177747&r1=177746&r2=177747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonRegisterInfo.cpp Fri Mar 22 13:41:34 2013
@@ -14,25 +14,26 @@
 
 #include "HexagonRegisterInfo.h"
 #include "Hexagon.h"
-#include "HexagonMachineFunctionInfo.h"
 #include "HexagonSubtarget.h"
 #include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
 #include "llvm/CodeGen/RegisterScavenging.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Type.h"
 #include "llvm/MC/MachineLocation.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
 
 using namespace llvm;
 
@@ -215,28 +216,41 @@ void HexagonRegisterInfo::eliminateFrame
         MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false,true);
         MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
       } else if (TII.isMemOp(&MI)) {
-        unsigned resReg = HEXAGON_RESERVED_REG_1;
-        if (!MFI.hasVarSizedObjects() &&
-            TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) {
-          MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(),
-                                                       false, false, true);
-          MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset);
-        } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
-          BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
-                  TII.get(Hexagon::CONST32_Int_Real), resReg).addImm(Offset);
-          BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
-                  TII.get(Hexagon::ADD_rr),
-                  resReg).addReg(FrameReg).addReg(resReg);
-          MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false,
-                                                       true);
-          MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
+        // use the constant extender if the instruction provides it
+        // and we are V4TOps.
+        if (Subtarget.hasV4TOps()) {
+          if (TII.isConstExtended(&MI)) {
+            MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false);
+            MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
+            TII.immediateExtend(&MI);
+          } else {
+            llvm_unreachable("Need to implement for memops");
+          }
         } else {
-          BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
-                  TII.get(Hexagon::ADD_ri),
-                  resReg).addReg(FrameReg).addImm(Offset);
-          MI.getOperand(FIOperandNum).ChangeToRegister(resReg, false, false,
-                                                       true);
-          MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
+          // Only V3 and older instructions here.
+          unsigned ResReg = HEXAGON_RESERVED_REG_1;
+          if (!MFI.hasVarSizedObjects() &&
+              TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) {
+            MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(),
+                                                         false, false, false);
+            MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset);
+          } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+            BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+                    TII.get(Hexagon::CONST32_Int_Real), ResReg).addImm(Offset);
+            BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+                    TII.get(Hexagon::ADD_rr), ResReg).addReg(FrameReg).
+              addReg(ResReg);
+            MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
+                                                         true);
+            MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
+          } else {
+            BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+                    TII.get(Hexagon::ADD_ri), ResReg).addReg(FrameReg).
+              addImm(Offset);
+            MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
+                                                         true);
+            MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
+          }
         }
       } else {
         unsigned dstReg = MI.getOperand(0).getReg();

Modified: llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp?rev=177747&r1=177746&r2=177747&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp Fri Mar 22 13:41:34 2013
@@ -29,8 +29,16 @@ EnableV3("enable-hexagon-v3", cl::Hidden
 static cl::opt<bool>
 EnableMemOps(
     "enable-hexagon-memops",
-    cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed,
-    cl::desc("Generate V4 memop instructions."));
+    cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(true),
+    cl::desc(
+      "Generate V4 MEMOP in code generation for Hexagon target"));
+
+static cl::opt<bool>
+DisableMemOps(
+    "disable-hexagon-memops",
+    cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(false),
+    cl::desc(
+      "Do not generate V4 MEMOP in code generation for Hexagon target"));
 
 static cl::opt<bool>
 EnableIEEERndNear(
@@ -64,7 +72,10 @@ HexagonSubtarget::HexagonSubtarget(Strin
   // Initialize scheduling itinerary for the specified CPU.
   InstrItins = getInstrItineraryForCPU(CPUString);
 
-  if (EnableMemOps)
+  // UseMemOps on by default unless disabled explicitly
+  if (DisableMemOps)
+    UseMemOps = false;
+  else if (EnableMemOps)
     UseMemOps = true;
   else
     UseMemOps = false;

Added: llvm/trunk/test/CodeGen/Hexagon/memops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops.ll?rev=177747&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memops.ll Fri Mar 22 13:41:34 2013
@@ -0,0 +1,1369 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5  < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i8* %p, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i8* %p, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %and = and i32 %conv, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_add_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_sub_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %and = and i32 %conv, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_add_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_sub_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv1 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %and = and i32 %conv, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i8* %p, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i8* %p, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %and = and i32 %conv2, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_setbit(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i8* %p, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %p, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_sub_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %and = and i32 %conv2, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 %i
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_add_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_sub_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i8 %x to i32
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv13 = zext i8 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %or3 = or i8 %0, %x
+  store i8 %or3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %and3 = and i8 %0, %x
+  store i8 %and3, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %and = and i32 %conv2, 223
+  %conv1 = trunc i32 %and to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i8* %p, i32 5
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv2 = zext i8 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i16* %p, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i16* %p, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %and = and i32 %conv, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_add_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_sub_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %and = and i32 %conv, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %add = add nsw i32 %conv, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_add_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %add = add nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_sub_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv1 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv1, %conv
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %and = and i32 %conv, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv = zext i16 %0 to i32
+  %or = or i32 %conv, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i16* %p, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i16* %p, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %and = and i32 %conv2, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_setbit(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i16* %p, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %p, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_sub_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %and = and i32 %conv2, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 %i
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %add = add nsw i32 %conv2, 5
+  %conv1 = trunc i32 %add to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_add_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %add = add nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %add to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_sub_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
+  %conv4 = zext i16 %x to i32
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv13 = zext i16 %0 to i32
+  %sub = sub nsw i32 %conv13, %conv4
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %or3 = or i16 %0, %x
+  store i16 %or3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %and3 = and i16 %0, %x
+  store i16 %and3, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %and = and i32 %conv2, 65503
+  %conv1 = trunc i32 %and to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i16* %p, i32 5
+  %0 = load i16* %add.ptr, align 2, !tbaa !2
+  %conv2 = zext i16 %0 to i32
+  %or = or i32 %conv2, 128
+  %conv1 = trunc i32 %or to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !2
+  ret void
+}
+
+define void @memop_signed_int_add5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i32* %p, align 4, !tbaa !3
+  %add = add i32 %0, 5
+  store i32 %add, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %add = add i32 %0, %x
+  store i32 %add, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %sub = sub i32 %0, %x
+  store i32 %sub, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i32* %p, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_setbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i32* %p, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add i32 %0, 5
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add i32 %0, %x
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %sub = sub i32 %0, %x
+  store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add i32 %0, 5
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add i32 %0, %x
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %sub = sub i32 %0, %x
+  store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %0 = load i32* %p, align 4, !tbaa !3
+  %add = add nsw i32 %0, 5
+  store i32 %add, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %add = add nsw i32 %0, %x
+  store i32 %add, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %sub = sub nsw i32 %0, %x
+  store i32 %sub, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %0 = load i32* %p, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %0 = load i32* %p, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %0 = load i32* %p, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %p, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add nsw i32 %0, 5
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add nsw i32 %0, %x
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %sub = sub nsw i32 %0, %x
+  store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 %i
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add nsw i32 %0, 5
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %add = add nsw i32 %0, %x
+  store i32 %add, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %sub = sub nsw i32 %0, %x
+  store i32 %sub, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, %x
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, %x
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %and = and i32 %0, -33
+  store i32 %and, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
+  %add.ptr = getelementptr inbounds i32* %p, i32 5
+  %0 = load i32* %add.ptr, align 4, !tbaa !3
+  %or = or i32 %0, 128
+  store i32 %or, i32* %add.ptr, align 4, !tbaa !3
+  ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
+!2 = metadata !{metadata !"short", metadata !0}
+!3 = metadata !{metadata !"int", metadata !0}

Added: llvm/trunk/test/CodeGen/Hexagon/memops1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops1.ll?rev=177747&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memops1.ll Fri Mar 22 13:41:34 2013
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5  < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i32* %p) nounwind {
+entry:
+; CHECK:  memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1
+  %p.addr = alloca i32*, align 4
+  store i32* %p, i32** %p.addr, align 4
+  %0 = load i32** %p.addr, align 4
+  %add.ptr = getelementptr inbounds i32* %0, i32 10
+  %1 = load i32* %add.ptr, align 4
+  %sub = sub nsw i32 %1, 1
+  store i32 %sub, i32* %add.ptr, align 4
+  ret void
+}
+
+define void @g(i32* %p, i32 %i) nounwind {
+entry:
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1
+  %p.addr = alloca i32*, align 4
+  %i.addr = alloca i32, align 4
+  store i32* %p, i32** %p.addr, align 4
+  store i32 %i, i32* %i.addr, align 4
+  %0 = load i32** %p.addr, align 4
+  %1 = load i32* %i.addr, align 4
+  %add.ptr = getelementptr inbounds i32* %0, i32 %1
+  %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 10
+  %2 = load i32* %add.ptr1, align 4
+  %sub = sub nsw i32 %2, 1
+  store i32 %sub, i32* %add.ptr1, align 4
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/memops2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops2.ll?rev=177747&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memops2.ll Fri Mar 22 13:41:34 2013
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5  < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i16* nocapture %p) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
+  %add.ptr = getelementptr inbounds i16* %p, i32 10
+  %0 = load i16* %add.ptr, align 2, !tbaa !0
+  %conv2 = zext i16 %0 to i32
+  %sub = add nsw i32 %conv2, 65535
+  %conv1 = trunc i32 %sub to i16
+  store i16 %conv1, i16* %add.ptr, align 2, !tbaa !0
+  ret void
+}
+
+define void @g(i16* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
+  %add.ptr.sum = add i32 %i, 10
+  %add.ptr1 = getelementptr inbounds i16* %p, i32 %add.ptr.sum
+  %0 = load i16* %add.ptr1, align 2, !tbaa !0
+  %conv3 = zext i16 %0 to i32
+  %sub = add nsw i32 %conv3, 65535
+  %conv2 = trunc i32 %sub to i16
+  store i16 %conv2, i16* %add.ptr1, align 2, !tbaa !0
+  ret void
+}
+
+!0 = metadata !{metadata !"short", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/memops3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops3.ll?rev=177747&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops3.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memops3.ll Fri Mar 22 13:41:34 2013
@@ -0,0 +1,31 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5  < %s | FileCheck %s
+; Generate MemOps for V4 and above.
+
+
+define void @f(i8* nocapture %p) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
+  %add.ptr = getelementptr inbounds i8* %p, i32 10
+  %0 = load i8* %add.ptr, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %sub = add nsw i32 %conv, 255
+  %conv1 = trunc i32 %sub to i8
+  store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+  ret void
+}
+
+define void @g(i8* nocapture %p, i32 %i) nounwind {
+entry:
+; CHECK:  memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
+  %add.ptr.sum = add i32 %i, 10
+  %add.ptr1 = getelementptr inbounds i8* %p, i32 %add.ptr.sum
+  %0 = load i8* %add.ptr1, align 1, !tbaa !0
+  %conv = zext i8 %0 to i32
+  %sub = add nsw i32 %conv, 255
+  %conv2 = trunc i32 %sub to i8
+  store i8 %conv2, i8* %add.ptr1, align 1, !tbaa !0
+  ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}





More information about the llvm-commits mailing list