[llvm] r191746 - [SystemZ] Add register zero extensions involving at least one high word

Richard Sandiford rsandifo at linux.vnet.ibm.com
Tue Oct 1 05:49:07 PDT 2013


Author: rsandifo
Date: Tue Oct  1 07:49:07 2013
New Revision: 191746

URL: http://llvm.org/viewvc/llvm-project?rev=191746&view=rev
Log:
[SystemZ] Add register zero extensions involving at least one high word

Added:
    llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll
Modified:
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
    llvm/trunk/test/CodeGen/SystemZ/asm-18.ll

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp?rev=191746&r1=191745&r2=191746&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp Tue Oct  1 07:49:07 2013
@@ -102,6 +102,17 @@ void SystemZInstrInfo::expandRXYPseudo(M
   MI->setDesc(get(Opcode));
 }
 
+// MI is an RR-style pseudo instruction that zero-extends the low Size bits
+// of one GRX32 into another.  Replace it with LowOpcode if both operands
+// are low registers, otherwise use RISB[LH]G.
+void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+                                        unsigned Size) const {
+  emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
+                MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
+                LowOpcode, Size, MI->getOperand(1).isKill());
+  MI->eraseFromParent();
+}
+
 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
 // DestReg before MBBI in MBB.  Use LowLowOpcode when both DestReg and SrcReg
 // are low registers, otherwise use RISB[LH]G.  Size is the number of bits
@@ -814,6 +825,14 @@ SystemZInstrInfo::expandPostRAPseudo(Mac
     expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
     return true;
 
+  case SystemZ::LLCRMux:
+    expandZExtPseudo(MI, SystemZ::LLCR, 8);
+    return true;
+
+  case SystemZ::LLHRMux:
+    expandZExtPseudo(MI, SystemZ::LLHR, 16);
+    return true;
+
   case SystemZ::LLCMux:
     expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
     return true;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h?rev=191746&r1=191745&r2=191746&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.h Tue Oct  1 07:49:07 2013
@@ -118,6 +118,8 @@ class SystemZInstrInfo : public SystemZG
   void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
   void expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
                        unsigned HighOpcode) const;
+  void expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+                        unsigned Size) const;
   void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
                      DebugLoc DL, unsigned DestReg, unsigned SrcReg,
                      unsigned LowLowOpcode, unsigned Size, bool KillSrc) const;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td?rev=191746&r1=191745&r2=191746&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td Tue Oct  1 07:49:07 2013
@@ -440,8 +440,14 @@ let Defs = [CC], CCValues = 0xE, Compare
 
 // 32-bit extensions from registers.
 let neverHasSideEffects = 1 in {
-  def LLCR : UnaryRRE<"llc", 0xB994, zext8,  GR32, GR32>;
-  def LLHR : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>;
+  // Expands to LLCR or RISB[LH]G, depending on the choice of registers.
+  def LLCRMux : UnaryRRPseudo<"llc", zext8, GRX32, GRX32>,
+                Requires<[FeatureHighWord]>;
+  def LLCR    : UnaryRRE<"llc", 0xB994, zext8,  GR32, GR32>;
+  // Expands to LLHR or RISB[LH]G, depending on the choice of registers.
+  def LLHRMux : UnaryRRPseudo<"llh", zext16, GRX32, GRX32>,
+                Requires<[FeatureHighWord]>;
+  def LLHR    : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>;
 }
 
 // 64-bit extensions from registers.

Modified: llvm/trunk/test/CodeGen/SystemZ/asm-18.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/asm-18.ll?rev=191746&r1=191745&r2=191746&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/asm-18.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/asm-18.ll Tue Oct  1 07:49:07 2013
@@ -192,3 +192,41 @@ define void @f8(i16 *%ptr1, i16 *%ptr2)
   store i16 %trunc2, i16 *%ptr4
   ret void
 }
+
+; Test zero extensions from 8 bits between mixtures of high and low registers.
+define i32 @f9(i8 %val1, i8 %val2) {
+; CHECK-LABEL: f9:
+; CHECK-DAG: risbhg [[REG1:%r[0-5]]], %r2, 24, 159, 32
+; CHECK-DAG: llcr [[REG2:%r[0-5]]], %r3
+; CHECK: stepa [[REG1]], [[REG2]]
+; CHECK: risbhg [[REG3:%r[0-5]]], [[REG1]], 24, 159, 0
+; CHECK: stepb [[REG3]]
+; CHECK: risblg %r2, [[REG3]], 24, 159, 32
+; CHECK: br %r14
+  %ext1 = zext i8 %val1 to i32
+  %ext2 = zext i8 %val2 to i32
+  %val3 = call i8 asm sideeffect "stepa $0, $1", "=h,0,r"(i32 %ext1, i32 %ext2)
+  %ext3 = zext i8 %val3 to i32
+  %val4 = call i8 asm sideeffect "stepb $0", "=h,0"(i32 %ext3)
+  %ext4 = zext i8 %val4 to i32
+  ret i32 %ext4
+}
+
+; Test zero extensions from 16 bits between mixtures of high and low registers.
+define i32 @f10(i16 %val1, i16 %val2) {
+; CHECK-LABEL: f10:
+; CHECK-DAG: risbhg [[REG1:%r[0-5]]], %r2, 16, 159, 32
+; CHECK-DAG: llhr [[REG2:%r[0-5]]], %r3
+; CHECK: stepa [[REG1]], [[REG2]]
+; CHECK: risbhg [[REG3:%r[0-5]]], [[REG1]], 16, 159, 0
+; CHECK: stepb [[REG3]]
+; CHECK: risblg %r2, [[REG3]], 16, 159, 32
+; CHECK: br %r14
+  %ext1 = zext i16 %val1 to i32
+  %ext2 = zext i16 %val2 to i32
+  %val3 = call i16 asm sideeffect "stepa $0, $1", "=h,0,r"(i32 %ext1, i32 %ext2)
+  %ext3 = zext i16 %val3 to i32
+  %val4 = call i16 asm sideeffect "stepb $0", "=h,0"(i32 %ext3)
+  %ext4 = zext i16 %val4 to i32
+  ret i32 %ext4
+}

Added: llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll?rev=191746&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll Tue Oct  1 07:49:07 2013
@@ -0,0 +1,350 @@
+; Test spills of zero extensions when high GR32s are available.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+; Test a case where we spill the source of at least one LLCRMux.  We want
+; to use LLC(H) if possible.
+define void @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: llc{{h?}} {{%r[0-9]+}}, 16{{[37]}}(%r15)
+; CHECK: br %r14
+  %val0 = load volatile i32 *%ptr
+  %val1 = load volatile i32 *%ptr
+  %val2 = load volatile i32 *%ptr
+  %val3 = load volatile i32 *%ptr
+  %val4 = load volatile i32 *%ptr
+  %val5 = load volatile i32 *%ptr
+  %val6 = load volatile i32 *%ptr
+  %val7 = load volatile i32 *%ptr
+  %val8 = load volatile i32 *%ptr
+  %val9 = load volatile i32 *%ptr
+  %val10 = load volatile i32 *%ptr
+  %val11 = load volatile i32 *%ptr
+  %val12 = load volatile i32 *%ptr
+  %val13 = load volatile i32 *%ptr
+  %val14 = load volatile i32 *%ptr
+  %val15 = load volatile i32 *%ptr
+  %val16 = load volatile i32 *%ptr
+  %val17 = load volatile i32 *%ptr
+  %val18 = load volatile i32 *%ptr
+  %val19 = load volatile i32 *%ptr
+  %val20 = load volatile i32 *%ptr
+  %val21 = load volatile i32 *%ptr
+  %val22 = load volatile i32 *%ptr
+  %val23 = load volatile i32 *%ptr
+  %val24 = load volatile i32 *%ptr
+  %val25 = load volatile i32 *%ptr
+  %val26 = load volatile i32 *%ptr
+  %val27 = load volatile i32 *%ptr
+  %val28 = load volatile i32 *%ptr
+  %val29 = load volatile i32 *%ptr
+  %val30 = load volatile i32 *%ptr
+  %val31 = load volatile i32 *%ptr
+
+  %trunc0 = trunc i32 %val0 to i8
+  %trunc1 = trunc i32 %val1 to i8
+  %trunc2 = trunc i32 %val2 to i8
+  %trunc3 = trunc i32 %val3 to i8
+  %trunc4 = trunc i32 %val4 to i8
+  %trunc5 = trunc i32 %val5 to i8
+  %trunc6 = trunc i32 %val6 to i8
+  %trunc7 = trunc i32 %val7 to i8
+  %trunc8 = trunc i32 %val8 to i8
+  %trunc9 = trunc i32 %val9 to i8
+  %trunc10 = trunc i32 %val10 to i8
+  %trunc11 = trunc i32 %val11 to i8
+  %trunc12 = trunc i32 %val12 to i8
+  %trunc13 = trunc i32 %val13 to i8
+  %trunc14 = trunc i32 %val14 to i8
+  %trunc15 = trunc i32 %val15 to i8
+  %trunc16 = trunc i32 %val16 to i8
+  %trunc17 = trunc i32 %val17 to i8
+  %trunc18 = trunc i32 %val18 to i8
+  %trunc19 = trunc i32 %val19 to i8
+  %trunc20 = trunc i32 %val20 to i8
+  %trunc21 = trunc i32 %val21 to i8
+  %trunc22 = trunc i32 %val22 to i8
+  %trunc23 = trunc i32 %val23 to i8
+  %trunc24 = trunc i32 %val24 to i8
+  %trunc25 = trunc i32 %val25 to i8
+  %trunc26 = trunc i32 %val26 to i8
+  %trunc27 = trunc i32 %val27 to i8
+  %trunc28 = trunc i32 %val28 to i8
+  %trunc29 = trunc i32 %val29 to i8
+  %trunc30 = trunc i32 %val30 to i8
+  %trunc31 = trunc i32 %val31 to i8
+
+  %ext0 = zext i8 %trunc0 to i32
+  %ext1 = zext i8 %trunc1 to i32
+  %ext2 = zext i8 %trunc2 to i32
+  %ext3 = zext i8 %trunc3 to i32
+  %ext4 = zext i8 %trunc4 to i32
+  %ext5 = zext i8 %trunc5 to i32
+  %ext6 = zext i8 %trunc6 to i32
+  %ext7 = zext i8 %trunc7 to i32
+  %ext8 = zext i8 %trunc8 to i32
+  %ext9 = zext i8 %trunc9 to i32
+  %ext10 = zext i8 %trunc10 to i32
+  %ext11 = zext i8 %trunc11 to i32
+  %ext12 = zext i8 %trunc12 to i32
+  %ext13 = zext i8 %trunc13 to i32
+  %ext14 = zext i8 %trunc14 to i32
+  %ext15 = zext i8 %trunc15 to i32
+  %ext16 = zext i8 %trunc16 to i32
+  %ext17 = zext i8 %trunc17 to i32
+  %ext18 = zext i8 %trunc18 to i32
+  %ext19 = zext i8 %trunc19 to i32
+  %ext20 = zext i8 %trunc20 to i32
+  %ext21 = zext i8 %trunc21 to i32
+  %ext22 = zext i8 %trunc22 to i32
+  %ext23 = zext i8 %trunc23 to i32
+  %ext24 = zext i8 %trunc24 to i32
+  %ext25 = zext i8 %trunc25 to i32
+  %ext26 = zext i8 %trunc26 to i32
+  %ext27 = zext i8 %trunc27 to i32
+  %ext28 = zext i8 %trunc28 to i32
+  %ext29 = zext i8 %trunc29 to i32
+  %ext30 = zext i8 %trunc30 to i32
+  %ext31 = zext i8 %trunc31 to i32
+
+  store volatile i32 %val0, i32 *%ptr
+  store volatile i32 %val1, i32 *%ptr
+  store volatile i32 %val2, i32 *%ptr
+  store volatile i32 %val3, i32 *%ptr
+  store volatile i32 %val4, i32 *%ptr
+  store volatile i32 %val5, i32 *%ptr
+  store volatile i32 %val6, i32 *%ptr
+  store volatile i32 %val7, i32 *%ptr
+  store volatile i32 %val8, i32 *%ptr
+  store volatile i32 %val9, i32 *%ptr
+  store volatile i32 %val10, i32 *%ptr
+  store volatile i32 %val11, i32 *%ptr
+  store volatile i32 %val12, i32 *%ptr
+  store volatile i32 %val13, i32 *%ptr
+  store volatile i32 %val14, i32 *%ptr
+  store volatile i32 %val15, i32 *%ptr
+  store volatile i32 %val16, i32 *%ptr
+  store volatile i32 %val17, i32 *%ptr
+  store volatile i32 %val18, i32 *%ptr
+  store volatile i32 %val19, i32 *%ptr
+  store volatile i32 %val20, i32 *%ptr
+  store volatile i32 %val21, i32 *%ptr
+  store volatile i32 %val22, i32 *%ptr
+  store volatile i32 %val23, i32 *%ptr
+  store volatile i32 %val24, i32 *%ptr
+  store volatile i32 %val25, i32 *%ptr
+  store volatile i32 %val26, i32 *%ptr
+  store volatile i32 %val27, i32 *%ptr
+  store volatile i32 %val28, i32 *%ptr
+  store volatile i32 %val29, i32 *%ptr
+  store volatile i32 %val30, i32 *%ptr
+  store volatile i32 %val31, i32 *%ptr
+
+  store volatile i32 %ext0, i32 *%ptr
+  store volatile i32 %ext1, i32 *%ptr
+  store volatile i32 %ext2, i32 *%ptr
+  store volatile i32 %ext3, i32 *%ptr
+  store volatile i32 %ext4, i32 *%ptr
+  store volatile i32 %ext5, i32 *%ptr
+  store volatile i32 %ext6, i32 *%ptr
+  store volatile i32 %ext7, i32 *%ptr
+  store volatile i32 %ext8, i32 *%ptr
+  store volatile i32 %ext9, i32 *%ptr
+  store volatile i32 %ext10, i32 *%ptr
+  store volatile i32 %ext11, i32 *%ptr
+  store volatile i32 %ext12, i32 *%ptr
+  store volatile i32 %ext13, i32 *%ptr
+  store volatile i32 %ext14, i32 *%ptr
+  store volatile i32 %ext15, i32 *%ptr
+  store volatile i32 %ext16, i32 *%ptr
+  store volatile i32 %ext17, i32 *%ptr
+  store volatile i32 %ext18, i32 *%ptr
+  store volatile i32 %ext19, i32 *%ptr
+  store volatile i32 %ext20, i32 *%ptr
+  store volatile i32 %ext21, i32 *%ptr
+  store volatile i32 %ext22, i32 *%ptr
+  store volatile i32 %ext23, i32 *%ptr
+  store volatile i32 %ext24, i32 *%ptr
+  store volatile i32 %ext25, i32 *%ptr
+  store volatile i32 %ext26, i32 *%ptr
+  store volatile i32 %ext27, i32 *%ptr
+  store volatile i32 %ext28, i32 *%ptr
+  store volatile i32 %ext29, i32 *%ptr
+  store volatile i32 %ext30, i32 *%ptr
+  store volatile i32 %ext31, i32 *%ptr
+
+  ret void
+}
+
+; Same again with i16, which should use LLH(H).
+define void @f2(i32 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: llh{{h?}} {{%r[0-9]+}}, 16{{[26]}}(%r15)
+; CHECK: br %r14
+  %val0 = load volatile i32 *%ptr
+  %val1 = load volatile i32 *%ptr
+  %val2 = load volatile i32 *%ptr
+  %val3 = load volatile i32 *%ptr
+  %val4 = load volatile i32 *%ptr
+  %val5 = load volatile i32 *%ptr
+  %val6 = load volatile i32 *%ptr
+  %val7 = load volatile i32 *%ptr
+  %val8 = load volatile i32 *%ptr
+  %val9 = load volatile i32 *%ptr
+  %val10 = load volatile i32 *%ptr
+  %val11 = load volatile i32 *%ptr
+  %val12 = load volatile i32 *%ptr
+  %val13 = load volatile i32 *%ptr
+  %val14 = load volatile i32 *%ptr
+  %val15 = load volatile i32 *%ptr
+  %val16 = load volatile i32 *%ptr
+  %val17 = load volatile i32 *%ptr
+  %val18 = load volatile i32 *%ptr
+  %val19 = load volatile i32 *%ptr
+  %val20 = load volatile i32 *%ptr
+  %val21 = load volatile i32 *%ptr
+  %val22 = load volatile i32 *%ptr
+  %val23 = load volatile i32 *%ptr
+  %val24 = load volatile i32 *%ptr
+  %val25 = load volatile i32 *%ptr
+  %val26 = load volatile i32 *%ptr
+  %val27 = load volatile i32 *%ptr
+  %val28 = load volatile i32 *%ptr
+  %val29 = load volatile i32 *%ptr
+  %val30 = load volatile i32 *%ptr
+  %val31 = load volatile i32 *%ptr
+
+  %trunc0 = trunc i32 %val0 to i16
+  %trunc1 = trunc i32 %val1 to i16
+  %trunc2 = trunc i32 %val2 to i16
+  %trunc3 = trunc i32 %val3 to i16
+  %trunc4 = trunc i32 %val4 to i16
+  %trunc5 = trunc i32 %val5 to i16
+  %trunc6 = trunc i32 %val6 to i16
+  %trunc7 = trunc i32 %val7 to i16
+  %trunc8 = trunc i32 %val8 to i16
+  %trunc9 = trunc i32 %val9 to i16
+  %trunc10 = trunc i32 %val10 to i16
+  %trunc11 = trunc i32 %val11 to i16
+  %trunc12 = trunc i32 %val12 to i16
+  %trunc13 = trunc i32 %val13 to i16
+  %trunc14 = trunc i32 %val14 to i16
+  %trunc15 = trunc i32 %val15 to i16
+  %trunc16 = trunc i32 %val16 to i16
+  %trunc17 = trunc i32 %val17 to i16
+  %trunc18 = trunc i32 %val18 to i16
+  %trunc19 = trunc i32 %val19 to i16
+  %trunc20 = trunc i32 %val20 to i16
+  %trunc21 = trunc i32 %val21 to i16
+  %trunc22 = trunc i32 %val22 to i16
+  %trunc23 = trunc i32 %val23 to i16
+  %trunc24 = trunc i32 %val24 to i16
+  %trunc25 = trunc i32 %val25 to i16
+  %trunc26 = trunc i32 %val26 to i16
+  %trunc27 = trunc i32 %val27 to i16
+  %trunc28 = trunc i32 %val28 to i16
+  %trunc29 = trunc i32 %val29 to i16
+  %trunc30 = trunc i32 %val30 to i16
+  %trunc31 = trunc i32 %val31 to i16
+
+  %ext0 = zext i16 %trunc0 to i32
+  %ext1 = zext i16 %trunc1 to i32
+  %ext2 = zext i16 %trunc2 to i32
+  %ext3 = zext i16 %trunc3 to i32
+  %ext4 = zext i16 %trunc4 to i32
+  %ext5 = zext i16 %trunc5 to i32
+  %ext6 = zext i16 %trunc6 to i32
+  %ext7 = zext i16 %trunc7 to i32
+  %ext8 = zext i16 %trunc8 to i32
+  %ext9 = zext i16 %trunc9 to i32
+  %ext10 = zext i16 %trunc10 to i32
+  %ext11 = zext i16 %trunc11 to i32
+  %ext12 = zext i16 %trunc12 to i32
+  %ext13 = zext i16 %trunc13 to i32
+  %ext14 = zext i16 %trunc14 to i32
+  %ext15 = zext i16 %trunc15 to i32
+  %ext16 = zext i16 %trunc16 to i32
+  %ext17 = zext i16 %trunc17 to i32
+  %ext18 = zext i16 %trunc18 to i32
+  %ext19 = zext i16 %trunc19 to i32
+  %ext20 = zext i16 %trunc20 to i32
+  %ext21 = zext i16 %trunc21 to i32
+  %ext22 = zext i16 %trunc22 to i32
+  %ext23 = zext i16 %trunc23 to i32
+  %ext24 = zext i16 %trunc24 to i32
+  %ext25 = zext i16 %trunc25 to i32
+  %ext26 = zext i16 %trunc26 to i32
+  %ext27 = zext i16 %trunc27 to i32
+  %ext28 = zext i16 %trunc28 to i32
+  %ext29 = zext i16 %trunc29 to i32
+  %ext30 = zext i16 %trunc30 to i32
+  %ext31 = zext i16 %trunc31 to i32
+
+  store volatile i32 %val0, i32 *%ptr
+  store volatile i32 %val1, i32 *%ptr
+  store volatile i32 %val2, i32 *%ptr
+  store volatile i32 %val3, i32 *%ptr
+  store volatile i32 %val4, i32 *%ptr
+  store volatile i32 %val5, i32 *%ptr
+  store volatile i32 %val6, i32 *%ptr
+  store volatile i32 %val7, i32 *%ptr
+  store volatile i32 %val8, i32 *%ptr
+  store volatile i32 %val9, i32 *%ptr
+  store volatile i32 %val10, i32 *%ptr
+  store volatile i32 %val11, i32 *%ptr
+  store volatile i32 %val12, i32 *%ptr
+  store volatile i32 %val13, i32 *%ptr
+  store volatile i32 %val14, i32 *%ptr
+  store volatile i32 %val15, i32 *%ptr
+  store volatile i32 %val16, i32 *%ptr
+  store volatile i32 %val17, i32 *%ptr
+  store volatile i32 %val18, i32 *%ptr
+  store volatile i32 %val19, i32 *%ptr
+  store volatile i32 %val20, i32 *%ptr
+  store volatile i32 %val21, i32 *%ptr
+  store volatile i32 %val22, i32 *%ptr
+  store volatile i32 %val23, i32 *%ptr
+  store volatile i32 %val24, i32 *%ptr
+  store volatile i32 %val25, i32 *%ptr
+  store volatile i32 %val26, i32 *%ptr
+  store volatile i32 %val27, i32 *%ptr
+  store volatile i32 %val28, i32 *%ptr
+  store volatile i32 %val29, i32 *%ptr
+  store volatile i32 %val30, i32 *%ptr
+  store volatile i32 %val31, i32 *%ptr
+
+  store volatile i32 %ext0, i32 *%ptr
+  store volatile i32 %ext1, i32 *%ptr
+  store volatile i32 %ext2, i32 *%ptr
+  store volatile i32 %ext3, i32 *%ptr
+  store volatile i32 %ext4, i32 *%ptr
+  store volatile i32 %ext5, i32 *%ptr
+  store volatile i32 %ext6, i32 *%ptr
+  store volatile i32 %ext7, i32 *%ptr
+  store volatile i32 %ext8, i32 *%ptr
+  store volatile i32 %ext9, i32 *%ptr
+  store volatile i32 %ext10, i32 *%ptr
+  store volatile i32 %ext11, i32 *%ptr
+  store volatile i32 %ext12, i32 *%ptr
+  store volatile i32 %ext13, i32 *%ptr
+  store volatile i32 %ext14, i32 *%ptr
+  store volatile i32 %ext15, i32 *%ptr
+  store volatile i32 %ext16, i32 *%ptr
+  store volatile i32 %ext17, i32 *%ptr
+  store volatile i32 %ext18, i32 *%ptr
+  store volatile i32 %ext19, i32 *%ptr
+  store volatile i32 %ext20, i32 *%ptr
+  store volatile i32 %ext21, i32 *%ptr
+  store volatile i32 %ext22, i32 *%ptr
+  store volatile i32 %ext23, i32 *%ptr
+  store volatile i32 %ext24, i32 *%ptr
+  store volatile i32 %ext25, i32 *%ptr
+  store volatile i32 %ext26, i32 *%ptr
+  store volatile i32 %ext27, i32 *%ptr
+  store volatile i32 %ext28, i32 *%ptr
+  store volatile i32 %ext29, i32 *%ptr
+  store volatile i32 %ext30, i32 *%ptr
+  store volatile i32 %ext31, i32 *%ptr
+
+  ret void
+}





More information about the llvm-commits mailing list