[llvm] 08ebd8c - [VE] aligned load/store isel patterns

Simon Moll via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 24 06:17:29 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-01-24T15:16:54+01:00
New Revision: 08ebd8c79e4b961d57da6c7e06b27db017327e8c

URL: https://github.com/llvm/llvm-project/commit/08ebd8c79e4b961d57da6c7e06b27db017327e8c
DIFF: https://github.com/llvm/llvm-project/commit/08ebd8c79e4b961d57da6c7e06b27db017327e8c.diff

LOG: [VE] aligned load/store isel patterns

Summary:
Aligned load/store isel patterns and tests for
i1/i8/16/32/64 (including extension and truncation) and fp32/64.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D73276

Added: 
    llvm/test/CodeGen/VE/load.ll
    llvm/test/CodeGen/VE/store.ll

Modified: 
    llvm/lib/Target/VE/VEISelDAGToDAG.cpp
    llvm/lib/Target/VE/VEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
index 43030993efb9..236611d34dc0 100644
--- a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
@@ -43,6 +43,9 @@ class VEDAGToDAGISel : public SelectionDAGISel {
 
   void Select(SDNode *N) override;
 
+  // Complex Pattern Selectors.
+  bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
+
   StringRef getPassName() const override {
     return "VE DAG->DAG Pattern Instruction Selection";
   }
@@ -52,6 +55,39 @@ class VEDAGToDAGISel : public SelectionDAGISel {
 };
 } // end anonymous namespace
 
+bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
+                                  SDValue &Offset) {
+  auto AddrTy = Addr->getValueType(0);
+  if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
+    Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+    return true;
+  }
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress ||
+      Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+    return false; // direct calls.
+
+  if (CurDAG->isBaseWithConstantOffset(Addr)) {
+    ConstantSDNode *CN = cast<ConstantSDNode>(Addr.getOperand(1));
+    if (isInt<13>(CN->getSExtValue())) {
+      if (FrameIndexSDNode *FIN =
+              dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+        // Constant offset from frame ref.
+        Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
+      } else {
+        Base = Addr.getOperand(0);
+      }
+      Offset =
+          CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), MVT::i32);
+      return true;
+    }
+  }
+  Base = Addr;
+  Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+  return true;
+}
+
 void VEDAGToDAGISel::Select(SDNode *N) {
   SDLoc dl(N);
   if (N->isMachineOpcode()) {

diff  --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td
index 7203a8e32418..412d73f461a8 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/llvm/lib/Target/VE/VEInstrInfo.td
@@ -130,6 +130,9 @@ def fcond2cc : SDNodeXForm<cond, [{
   return CurDAG->getTargetConstant(cc, SDLoc(N), MVT::i32);
 }]>;
 
+// Addressing modes.
+def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
+
 // ASX format of memory address
 def MEMri : Operand<iPTR> {
   let PrintMethod = "printMemASXOperand";
@@ -675,7 +678,43 @@ let cy = 0, sy = 0, cz = 1 in {
 let cx = 0 in
 def LDSri : RM<
     0x01, (outs I64:$sx), (ins MEMri:$addr),
-    "ld $sx, $addr">;
+    "ld $sx, $addr",
+    [(set i64:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LDUri : RM<
+    0x02, (outs F32:$sx), (ins MEMri:$addr),
+    "ldu $sx, $addr",
+    [(set f32:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LDLri : RM<
+    0x03, (outs I32:$sx), (ins MEMri:$addr),
+    "ldl.sx $sx, $addr",
+    [(set i32:$sx, (load ADDRri:$addr))]>;
+let cx = 1 in
+def LDLUri : RM<
+    0x03, (outs I32:$sx), (ins MEMri:$addr),
+    "ldl.zx $sx, $addr",
+    [(set i32:$sx, (load ADDRri:$addr))]>;
+let cx = 0 in
+def LD2Bri : RM<
+    0x04, (outs I32:$sx), (ins MEMri:$addr),
+    "ld2b.sx $sx, $addr",
+    [(set i32:$sx, (sextloadi16 ADDRri:$addr))]>;
+let cx = 1 in
+def LD2BUri : RM<
+    0x04, (outs I32:$sx), (ins MEMri:$addr),
+    "ld2b.zx $sx, $addr",
+    [(set i32:$sx, (zextloadi16 ADDRri:$addr))]>;
+let cx = 0 in
+def LD1Bri : RM<
+    0x05, (outs I32:$sx), (ins MEMri:$addr),
+    "ld1b.sx $sx, $addr",
+    [(set i32:$sx, (sextloadi8 ADDRri:$addr))]>;
+let cx = 1 in
+def LD1BUri : RM<
+    0x05, (outs I32:$sx), (ins MEMri:$addr),
+    "ld1b.zx $sx, $addr",
+    [(set i32:$sx, (zextloadi8 ADDRri:$addr))]>;
 }
 }
 
@@ -683,10 +722,30 @@ let mayStore = 1, hasSideEffects = 0 in {
 let cx = 0, cy = 0, sy = 0, cz = 1 in {
 def STSri : RM<
     0x11, (outs), (ins MEMri:$addr, I64:$sx),
-    "st $sx, $addr">;
+    "st $sx, $addr",
+    [(store i64:$sx, ADDRri:$addr)]>;
+def STUri : RM<
+    0x12, (outs), (ins MEMri:$addr, F32:$sx),
+    "stu $sx, $addr",
+    [(store f32:$sx, ADDRri:$addr)]>;
+def STLri : RM<
+    0x13, (outs), (ins MEMri:$addr, I32:$sx),
+    "stl $sx, $addr",
+    [(store i32:$sx, ADDRri:$addr)]>;
+def ST2Bri : RM<
+    0x14, (outs), (ins MEMri:$addr, I32:$sx),
+    "st2b $sx, $addr",
+    [(truncstorei16 i32:$sx, ADDRri:$addr)]>;
+def ST1Bri : RM<
+    0x15, (outs), (ins MEMri:$addr, I32:$sx),
+    "st1b $sx, $addr",
+    [(truncstorei8 i32:$sx, ADDRri:$addr)]>;
 }
 }
 
+def : Pat<(f64 (load ADDRri:$addr)), (LDSri ADDRri:$addr)>;
+def : Pat<(store f64:$sx, ADDRri:$addr), (STSri ADDRri:$addr, $sx)>;
+
 // Return instruction is also a special case of jump.
 let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 15 /* AT */, cy = 0, sy = 0,
     cz = 1, sz = 0x10 /* SX10 */, imm32 = 0, Uses = [SX10],
@@ -792,6 +851,37 @@ def : Pat<(i64 (anyext i32:$sy)),
           (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32)>;
 
 
+// extload, sextload and zextload stuff
+def : Pat<(i64 (sextloadi8 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1Bri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi8 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (sextloadi16 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi16 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (sextloadi32 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (zextloadi32 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi8 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi16 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
+def : Pat<(i64 (extloadi32 ADDRri:$addr)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+
+// anyextload
+def : Pat<(extloadi8  ADDRri:$addr), (LD1BUri MEMri:$addr)>;
+def : Pat<(extloadi16 ADDRri:$addr), (LD2BUri MEMri:$addr)>;
+
+// truncstore
+def : Pat<(truncstorei8 i64:$src, ADDRri:$addr),
+          (ST1Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+def : Pat<(truncstorei16 i64:$src, ADDRri:$addr),
+          (ST2Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+def : Pat<(truncstorei32 i64:$src, ADDRri:$addr),
+          (STLri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
 
 //===----------------------------------------------------------------------===//
 // Pseudo Instructions

diff  --git a/llvm/test/CodeGen/VE/load.ll b/llvm/test/CodeGen/VE/load.ll
new file mode 100644
index 000000000000..6532a8e0e0ad
--- /dev/null
+++ b/llvm/test/CodeGen/VE/load.ll
@@ -0,0 +1,194 @@
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64(double* nocapture readonly %0) {
+; CHECK-LABEL: loadf64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load double, double* %0, align 16
+  ret double %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32(float* nocapture readonly %0) {
+; CHECK-LABEL: loadf32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldu %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load float, float* %0, align 16
+  ret float %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64(i64* nocapture readonly %0) {
+; CHECK-LABEL: loadi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i64, i64* %0, align 16
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldl.sx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i32, i32* %0, align 16
+  ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi32sext(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32sext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldl.sx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i32, i32* %0, align 16
+  %3 = sext i32 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi32zext(i32* nocapture readonly %0) {
+; CHECK-LABEL: loadi32zext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldl.zx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i32, i32* %0, align 16
+  %3 = zext i32 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld2b.zx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i16, i16* %0, align 16
+  ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi16sext(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16sext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld2b.sx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i16, i16* %0, align 16
+  %3 = sext i16 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi16zext(i16* nocapture readonly %0) {
+; CHECK-LABEL: loadi16zext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld2b.zx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i16, i16* %0, align 16
+  %3 = zext i16 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld1b.zx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i8, i8* %0, align 16
+  ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi8sext(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8sext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld1b.sx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i8, i8* %0, align 16
+  %3 = sext i8 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi8zext(i8* nocapture readonly %0) {
+; CHECK-LABEL: loadi8zext:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld1b.zx %s0, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = load i8, i8* %0, align 16
+  %3 = zext i8 %2 to i64
+  ret i64 %3
+}
+
+; Function Attrs: norecurse nounwind readonly
+define double @loadf64stk() {
+; CHECK-LABEL: loadf64stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca double, align 16
+  %1 = load double, double* %addr, align 16
+  ret double %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define float @loadf32stk() {
+; CHECK-LABEL: loadf32stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldu %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca float, align 16
+  %1 = load float, float* %addr, align 16
+  ret float %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i64 @loadi64stk() {
+; CHECK-LABEL: loadi64stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i64, align 16
+  %1 = load i64, i64* %addr, align 16
+  ret i64 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @loadi32stk() {
+; CHECK-LABEL: loadi32stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ldl.sx %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i32, align 16
+  %1 = load i32, i32* %addr, align 16
+  ret i32 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i16 @loadi16stk() {
+; CHECK-LABEL: loadi16stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld2b.zx %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i16, align 16
+  %1 = load i16, i16* %addr, align 16
+  ret i16 %1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i8 @loadi8stk() {
+; CHECK-LABEL: loadi8stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    ld1b.zx %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i8, align 16
+  %1 = load i8, i8* %addr, align 16
+  ret i8 %1
+}
+

diff  --git a/llvm/test/CodeGen/VE/store.ll b/llvm/test/CodeGen/VE/store.ll
new file mode 100644
index 000000000000..984d2cb4df92
--- /dev/null
+++ b/llvm/test/CodeGen/VE/store.ll
@@ -0,0 +1,160 @@
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64(double* nocapture %0, double %1) {
+; CHECK-LABEL: storef64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store double %1, double* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32(float* nocapture %0, float %1) {
+; CHECK-LABEL: storef32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    stu %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store float %1, float* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64(i64* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store i64 %1, i64* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32(i32* nocapture %0, i32 %1) {
+; CHECK-LABEL: storei32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    stl %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store i32 %1, i32* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32tr(i32* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei32tr:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    stl %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = trunc i64 %1 to i32
+  store i32 %3, i32* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16(i16* nocapture %0, i16 %1) {
+; CHECK-LABEL: storei16:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st2b %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store i16 %1, i16* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16tr(i16* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei16tr:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st2b %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = trunc i64 %1 to i16
+  store i16 %3, i16* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8(i8* nocapture %0, i8 %1) {
+; CHECK-LABEL: storei8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st1b %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  store i8 %1, i8* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8tr(i8* nocapture %0, i64 %1) {
+; CHECK-LABEL: storei8tr:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st1b %s1, (,%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %3 = trunc i64 %1 to i8
+  store i8 %3, i8* %0, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef64stk(double %0) {
+; CHECK-LABEL: storef64stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca double, align 16
+  store double %0, double* %addr, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storef32stk(float %0) {
+; CHECK-LABEL: storef32stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    stu %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca float, align 16
+  store float %0, float* %addr, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei64stk(i64 %0) {
+; CHECK-LABEL: storei64stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i64, align 16
+  store i64 %0, i64* %addr, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei32stk(i32 %0) {
+; CHECK-LABEL: storei32stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    stl %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i32, align 16
+  store i32 %0, i32* %addr, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei16stk(i16 %0) {
+; CHECK-LABEL: storei16stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st2b %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i16, align 16
+  store i16 %0, i16* %addr, align 16
+  ret void
+}
+
+; Function Attrs: norecurse nounwind readonly
+define void @storei8stk(i8 %0) {
+; CHECK-LABEL: storei8stk:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    st1b %s0, 176(,%s11)
+; CHECK-NEXT:    or %s11, 0, %s9
+  %addr = alloca i8, align 16
+  store i8 %0, i8* %addr, align 16
+  ret void
+}


        


More information about the llvm-commits mailing list