[llvm] r363806 - [SystemZ] Support vector load/store alignment hints

Ulrich Weigand via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 19 07:20:00 PDT 2019


Author: uweigand
Date: Wed Jun 19 07:20:00 2019
New Revision: 363806

URL: http://llvm.org/viewvc/llvm-project?rev=363806&view=rev
Log:
[SystemZ] Support vector load/store alignment hints

Vector load/store instructions support an optional alignment field
that the compiler can use to provide known alignment info to the
hardware.  If the field is used (and the information is correct),
the hardware may be able (on some models) to perform faster memory
accesses than otherwise.

This patch adds support for alignment hints in the assembler and
disassembler, and fills in known alignment during codegen.


Modified:
    llvm/trunk/lib/Target/SystemZ/SystemZAsmPrinter.cpp
    llvm/trunk/lib/Target/SystemZ/SystemZInstrFormats.td
    llvm/trunk/lib/Target/SystemZ/SystemZInstrVector.td
    llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ13.td
    llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ14.td
    llvm/trunk/test/CodeGen/SystemZ/frame-19.ll
    llvm/trunk/test/CodeGen/SystemZ/vec-move-02.ll
    llvm/trunk/test/CodeGen/SystemZ/vec-move-03.ll
    llvm/trunk/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
    llvm/trunk/test/MC/Disassembler/SystemZ/insns-z13.txt
    llvm/trunk/test/MC/SystemZ/insn-bad-z13.s
    llvm/trunk/test/MC/SystemZ/insn-good-z13.s

Modified: llvm/trunk/lib/Target/SystemZ/SystemZAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZAsmPrinter.cpp?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZAsmPrinter.cpp Wed Jun 19 07:20:00 2019
@@ -80,6 +80,27 @@ static const MCSymbolRefExpr *getGlobalO
                                  Context);
 }
 
+// MI is an instruction that accepts an optional alignment hint,
+// and which was already lowered to LoweredMI.  If the alignment
+// of the original memory operand is known, update LoweredMI to
+// an instruction with the corresponding hint set.
+static void lowerAlignmentHint(const MachineInstr *MI, MCInst &LoweredMI,
+                               unsigned Opcode) {
+  if (!MI->hasOneMemOperand())
+    return;
+  const MachineMemOperand *MMO = *MI->memoperands_begin();
+  unsigned AlignmentHint = 0;
+  if (MMO->getAlignment() >= 16)
+    AlignmentHint = 4;
+  else if (MMO->getAlignment() >= 8)
+    AlignmentHint = 3;
+  if (AlignmentHint == 0)
+    return;
+
+  LoweredMI.setOpcode(Opcode);
+  LoweredMI.addOperand(MCOperand::createImm(AlignmentHint));
+}
+
 // MI loads the high part of a vector from memory.  Return an instruction
 // that uses replicating vector load Opcode to do the same thing.
 static MCInst lowerSubvectorLoad(const MachineInstr *MI, unsigned Opcode) {
@@ -351,6 +372,26 @@ void SystemZAsmPrinter::EmitInstruction(
       .addReg(SystemZMC::getRegAsVR128(MI->getOperand(1).getReg()));
     break;
 
+  case SystemZ::VL:
+    Lower.lower(MI, LoweredMI);
+    lowerAlignmentHint(MI, LoweredMI, SystemZ::VLAlign);
+    break;
+
+  case SystemZ::VST:
+    Lower.lower(MI, LoweredMI);
+    lowerAlignmentHint(MI, LoweredMI, SystemZ::VSTAlign);
+    break;
+
+  case SystemZ::VLM:
+    Lower.lower(MI, LoweredMI);
+    lowerAlignmentHint(MI, LoweredMI, SystemZ::VLMAlign);
+    break;
+
+  case SystemZ::VSTM:
+    Lower.lower(MI, LoweredMI);
+    lowerAlignmentHint(MI, LoweredMI, SystemZ::VSTMAlign);
+    break;
+
   case SystemZ::VL32:
     LoweredMI = lowerSubvectorLoad(MI, SystemZ::VLREPF);
     break;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrFormats.td?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrFormats.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrFormats.td Wed Jun 19 07:20:00 2019
@@ -2425,11 +2425,16 @@ class LoadMultipleSSe<string mnemonic, b
   let mayLoad = 1;
 }
 
-class LoadMultipleVRSa<string mnemonic, bits<16> opcode>
-  : InstVRSa<opcode, (outs VR128:$V1, VR128:$V3), (ins bdaddr12only:$BD2),
-             mnemonic#"\t$V1, $V3, $BD2", []> {
-  let M4 = 0;
-  let mayLoad = 1;
+multiclass LoadMultipleVRSaAlign<string mnemonic, bits<16> opcode> {
+  let mayLoad = 1 in {
+    def Align : InstVRSa<opcode, (outs VR128:$V1, VR128:$V3),
+                        (ins bdaddr12only:$BD2, imm32zx4:$M4),
+                        mnemonic#"\t$V1, $V3, $BD2, $M4", []>;
+    let M4 = 0 in
+      def "" : InstVRSa<opcode, (outs VR128:$V1, VR128:$V3),
+                        (ins bdaddr12only:$BD2),
+                        mnemonic#"\t$V1, $V3, $BD2", []>;
+  }
 }
 
 class StoreRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
@@ -2490,6 +2495,17 @@ class StoreVRX<string mnemonic, bits<16>
   let AccessBytes = bytes;
 }
 
+multiclass StoreVRXAlign<string mnemonic, bits<16> opcode> {
+  let mayStore = 1, AccessBytes = 16 in {
+    def Align : InstVRX<opcode, (outs),
+                        (ins VR128:$V1, bdxaddr12only:$XBD2, imm32zx4:$M3),
+                        mnemonic#"\t$V1, $XBD2, $M3", []>;
+    let M3 = 0 in
+      def "" : InstVRX<opcode, (outs), (ins VR128:$V1, bdxaddr12only:$XBD2),
+                       mnemonic#"\t$V1, $XBD2", []>;
+  }
+}
+
 class StoreLengthVRSb<string mnemonic, bits<16> opcode,
                       SDPatternOperator operator, bits<5> bytes>
   : InstVRSb<opcode, (outs), (ins VR128:$V1, GR32:$R3, bdaddr12only:$BD2),
@@ -2542,11 +2558,16 @@ multiclass StoreMultipleRSPair<string mn
   }
 }
 
-class StoreMultipleVRSa<string mnemonic, bits<16> opcode>
-  : InstVRSa<opcode, (outs), (ins VR128:$V1, VR128:$V3, bdaddr12only:$BD2),
-             mnemonic#"\t$V1, $V3, $BD2", []> {
-  let M4 = 0;
-  let mayStore = 1;
+multiclass StoreMultipleVRSaAlign<string mnemonic, bits<16> opcode> {
+  let mayStore = 1 in {
+    def Align : InstVRSa<opcode, (outs), (ins VR128:$V1, VR128:$V3,
+                                              bdaddr12only:$BD2, imm32zx4:$M4),
+                         mnemonic#"\t$V1, $V3, $BD2, $M4", []>;
+    let M4 = 0 in
+      def "" : InstVRSa<opcode, (outs), (ins VR128:$V1, VR128:$V3,
+                                             bdaddr12only:$BD2),
+                        mnemonic#"\t$V1, $V3, $BD2", []>;
+  }
 }
 
 // StoreSI* instructions are used to store an integer to memory, but the
@@ -2940,6 +2961,17 @@ class UnaryVRXGeneric<string mnemonic, b
   let mayLoad = 1;
 }
 
+multiclass UnaryVRXAlign<string mnemonic, bits<16> opcode> {
+  let mayLoad = 1, AccessBytes = 16 in {
+    def Align : InstVRX<opcode, (outs VR128:$V1),
+                        (ins bdxaddr12only:$XBD2, imm32zx4:$M3),
+                        mnemonic#"\t$V1, $XBD2, $M3", []>;
+    let M3 = 0 in
+      def "" : InstVRX<opcode, (outs VR128:$V1), (ins bdxaddr12only:$XBD2),
+                       mnemonic#"\t$V1, $XBD2", []>;
+  }
+}
+
 class SideEffectBinaryRX<string mnemonic, bits<8> opcode,
                          RegisterOperand cls>
   : InstRXa<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2),

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrVector.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrVector.td?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrVector.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrVector.td Wed Jun 19 07:20:00 2019
@@ -103,7 +103,7 @@ let Predicates = [FeatureVector] in {
 
 let Predicates = [FeatureVector] in {
   // Load.
-  def VL : UnaryVRX<"vl", 0xE706, null_frag, v128any, 16>;
+  defm VL : UnaryVRXAlign<"vl", 0xE706>;
 
   // Load to block boundary.  The number of loaded bytes is only known
   // at run time.  The instruction is really polymorphic, but v128b matches
@@ -122,7 +122,7 @@ let Predicates = [FeatureVector] in {
   def VLL : BinaryVRSb<"vll", 0xE737, int_s390_vll, 0>;
 
   // Load multiple.
-  def VLM : LoadMultipleVRSa<"vlm", 0xE736>;
+  defm VLM : LoadMultipleVRSaAlign<"vlm", 0xE736>;
 
   // Load and replicate
   def VLREP  : UnaryVRXGeneric<"vlrep", 0xE705>;
@@ -207,13 +207,13 @@ defm : ReplicatePeephole<VLREPG, v2f64,
 
 let Predicates = [FeatureVector] in {
   // Store.
-  def VST : StoreVRX<"vst", 0xE70E, null_frag, v128any, 16>;
+  defm VST : StoreVRXAlign<"vst", 0xE70E>;
 
   // Store with length.  The number of stored bytes is only known at run time.
   def VSTL : StoreLengthVRSb<"vstl", 0xE73F, int_s390_vstl, 0>;
 
   // Store multiple.
-  def VSTM : StoreMultipleVRSa<"vstm", 0xE73E>;
+  defm VSTM : StoreMultipleVRSaAlign<"vstm", 0xE73E>;
 
   // Store element.
   def VSTEB : StoreBinaryVRX<"vsteb", 0xE708, z_vstei8,  v128b, 1, imm32zx4>;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ13.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ13.td?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ13.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ13.td Wed Jun 19 07:20:00 2019
@@ -1191,8 +1191,8 @@ def : InstRW<[WLat2, VecXsPm, NormalGr],
 // Vector: Loads
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(BB)?$")>;
-def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLL$")>;
+def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(Align)?$")>;
+def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(L|BB)$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(32|64)$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLLEZ(B|F|G|H)?$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLREP(B|F|G|H)?$")>;
@@ -1200,16 +1200,17 @@ def : InstRW<[WLat2LSU, RegReadAdv, VecX
              (instregex "VLE(B|F|G|H)$")>;
 def : InstRW<[WLat6LSU, RegReadAdv, FXb, LSU, VecXsPm, Cracked],
              (instregex "VGE(F|G)$")>;
-def : InstRW<[WLat4LSU, WLat4LSU, LSU5, GroupAlone], (instregex "VLM$")>;
+def : InstRW<[WLat4LSU, WLat4LSU, LSU5, GroupAlone],
+             (instregex "VLM(Align)?$")>;
 
 //===----------------------------------------------------------------------===//
 // Vector: Stores
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VST(L|32|64)?$")>;
+def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VST(Align|L|32|64)?$")>;
 def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VSTE(F|G)$")>;
 def : InstRW<[WLat1, FXb, LSU, VecXsPm, Cracked], (instregex "VSTE(B|H)$")>;
-def : InstRW<[WLat1, LSU2, FXb3, GroupAlone2], (instregex "VSTM$")>;
+def : InstRW<[WLat1, LSU2, FXb3, GroupAlone2], (instregex "VSTM(Align)?$")>;
 def : InstRW<[WLat1, FXb2, LSU, Cracked], (instregex "VSCE(F|G)$")>;
 
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ14.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ14.td?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ14.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZScheduleZ14.td Wed Jun 19 07:20:00 2019
@@ -1209,8 +1209,8 @@ def : InstRW<[WLat2, VecXsPm, NormalGr],
 // Vector: Loads
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(BB)?$")>;
-def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLL$")>;
+def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(Align)?$")>;
+def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(L|BB)$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VL(32|64)$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLLEZ(B|F|G|H|LF)?$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLREP(B|F|G|H)?$")>;
@@ -1218,17 +1218,18 @@ def : InstRW<[WLat2LSU, RegReadAdv, VecX
              (instregex "VLE(B|F|G|H)$")>;
 def : InstRW<[WLat5LSU, RegReadAdv, FXb, LSU, VecXsPm, Cracked],
              (instregex "VGE(F|G)$")>;
-def : InstRW<[WLat4LSU, WLat4LSU, LSU5, GroupAlone], (instregex "VLM$")>;
+def : InstRW<[WLat4LSU, WLat4LSU, LSU5, GroupAlone],
+             (instregex "VLM(Align)?$")>;
 def : InstRW<[LSULatency, LSU, NormalGr], (instregex "VLRL(R)?$")>;
 
 //===----------------------------------------------------------------------===//
 // Vector: Stores
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VST(L|32|64)?$")>;
+def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VST(Align|L|32|64)?$")>;
 def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VSTE(F|G)$")>;
 def : InstRW<[WLat1, FXb, LSU, VecXsPm, Cracked], (instregex "VSTE(B|H)$")>;
-def : InstRW<[WLat1, LSU2, FXb3, GroupAlone2], (instregex "VSTM$")>;
+def : InstRW<[WLat1, LSU2, FXb3, GroupAlone2], (instregex "VSTM(Align)?$")>;
 def : InstRW<[WLat1, FXb2, LSU, Cracked], (instregex "VSCE(F|G)$")>;
 def : InstRW<[WLat1, FXb, LSU, NormalGr], (instregex "VSTRL(R)?$")>;
 

Modified: llvm/trunk/test/CodeGen/SystemZ/frame-19.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/frame-19.ll?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/frame-19.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/frame-19.ll Wed Jun 19 07:20:00 2019
@@ -15,8 +15,8 @@ define void @f1(<16 x i8> *%ptr) {
 ; CHECK-DAG: std %f13,
 ; CHECK-DAG: std %f14,
 ; CHECK-DAG: std %f15,
-; CHECK: vst {{%v[0-9]+}}, 160(%r15)
-; CHECK: vl {{%v[0-9]+}}, 160(%r15)
+; CHECK: vst {{%v[0-9]+}}, 160(%r15), 3
+; CHECK: vl {{%v[0-9]+}}, 160(%r15), 3
 ; CHECK-DAG: ld %f8,
 ; CHECK-DAG: ld %f9,
 ; CHECK-DAG: ld %f10,

Modified: llvm/trunk/test/CodeGen/SystemZ/vec-move-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vec-move-02.ll?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vec-move-02.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vec-move-02.ll Wed Jun 19 07:20:00 2019
@@ -5,7 +5,7 @@
 ; Test v16i8 loads.
 define <16 x i8> @f1(<16 x i8> *%ptr) {
 ; CHECK-LABEL: f1:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <16 x i8>, <16 x i8> *%ptr
   ret <16 x i8> %ret
@@ -14,7 +14,7 @@ define <16 x i8> @f1(<16 x i8> *%ptr) {
 ; Test v8i16 loads.
 define <8 x i16> @f2(<8 x i16> *%ptr) {
 ; CHECK-LABEL: f2:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <8 x i16>, <8 x i16> *%ptr
   ret <8 x i16> %ret
@@ -23,7 +23,7 @@ define <8 x i16> @f2(<8 x i16> *%ptr) {
 ; Test v4i32 loads.
 define <4 x i32> @f3(<4 x i32> *%ptr) {
 ; CHECK-LABEL: f3:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <4 x i32>, <4 x i32> *%ptr
   ret <4 x i32> %ret
@@ -32,7 +32,7 @@ define <4 x i32> @f3(<4 x i32> *%ptr) {
 ; Test v2i64 loads.
 define <2 x i64> @f4(<2 x i64> *%ptr) {
 ; CHECK-LABEL: f4:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <2 x i64>, <2 x i64> *%ptr
   ret <2 x i64> %ret
@@ -41,7 +41,7 @@ define <2 x i64> @f4(<2 x i64> *%ptr) {
 ; Test v4f32 loads.
 define <4 x float> @f5(<4 x float> *%ptr) {
 ; CHECK-LABEL: f5:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <4 x float>, <4 x float> *%ptr
   ret <4 x float> %ret
@@ -50,7 +50,7 @@ define <4 x float> @f5(<4 x float> *%ptr
 ; Test v2f64 loads.
 define <2 x double> @f6(<2 x double> *%ptr) {
 ; CHECK-LABEL: f6:
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ret = load <2 x double>, <2 x double> *%ptr
   ret <2 x double> %ret
@@ -59,7 +59,7 @@ define <2 x double> @f6(<2 x double> *%p
 ; Test the highest aligned in-range offset.
 define <16 x i8> @f7(<16 x i8> *%base) {
 ; CHECK-LABEL: f7:
-; CHECK: vl %v24, 4080(%r2)
+; CHECK: vl %v24, 4080(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
   %ret = load <16 x i8>, <16 x i8> *%ptr
@@ -81,7 +81,7 @@ define <16 x i8> @f8(i8 *%base) {
 define <16 x i8> @f9(<16 x i8> *%base) {
 ; CHECK-LABEL: f9:
 ; CHECK: aghi %r2, 4096
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
   %ret = load <16 x i8>, <16 x i8> *%ptr
@@ -92,7 +92,7 @@ define <16 x i8> @f9(<16 x i8> *%base) {
 define <16 x i8> @f10(<16 x i8> *%base) {
 ; CHECK-LABEL: f10:
 ; CHECK: aghi %r2, -16
-; CHECK: vl %v24, 0(%r2)
+; CHECK: vl %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
   %ret = load <16 x i8>, <16 x i8> *%ptr
@@ -172,3 +172,13 @@ define <2 x float> @f18(<2 x float> *%pt
   %ret = load <2 x float>, <2 x float> *%ptr
   ret <2 x float> %ret
 }
+
+; Test quadword-aligned loads.
+define <16 x i8> @f19(<16 x i8> *%ptr) {
+; CHECK-LABEL: f19:
+; CHECK: vl %v24, 0(%r2), 4
+; CHECK: br %r14
+  %ret = load <16 x i8>, <16 x i8> *%ptr, align 16
+  ret <16 x i8> %ret
+}
+

Modified: llvm/trunk/test/CodeGen/SystemZ/vec-move-03.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vec-move-03.ll?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vec-move-03.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vec-move-03.ll Wed Jun 19 07:20:00 2019
@@ -5,7 +5,7 @@
 ; Test v16i8 stores.
 define void @f1(<16 x i8> %val, <16 x i8> *%ptr) {
 ; CHECK-LABEL: f1:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <16 x i8> %val, <16 x i8> *%ptr
   ret void
@@ -14,7 +14,7 @@ define void @f1(<16 x i8> %val, <16 x i8
 ; Test v8i16 stores.
 define void @f2(<8 x i16> %val, <8 x i16> *%ptr) {
 ; CHECK-LABEL: f2:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <8 x i16> %val, <8 x i16> *%ptr
   ret void
@@ -23,7 +23,7 @@ define void @f2(<8 x i16> %val, <8 x i16
 ; Test v4i32 stores.
 define void @f3(<4 x i32> %val, <4 x i32> *%ptr) {
 ; CHECK-LABEL: f3:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <4 x i32> %val, <4 x i32> *%ptr
   ret void
@@ -32,7 +32,7 @@ define void @f3(<4 x i32> %val, <4 x i32
 ; Test v2i64 stores.
 define void @f4(<2 x i64> %val, <2 x i64> *%ptr) {
 ; CHECK-LABEL: f4:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <2 x i64> %val, <2 x i64> *%ptr
   ret void
@@ -41,7 +41,7 @@ define void @f4(<2 x i64> %val, <2 x i64
 ; Test v4f32 stores.
 define void @f5(<4 x float> %val, <4 x float> *%ptr) {
 ; CHECK-LABEL: f5:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <4 x float> %val, <4 x float> *%ptr
   ret void
@@ -50,7 +50,7 @@ define void @f5(<4 x float> %val, <4 x f
 ; Test v2f64 stores.
 define void @f6(<2 x double> %val, <2 x double> *%ptr) {
 ; CHECK-LABEL: f6:
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   store <2 x double> %val, <2 x double> *%ptr
   ret void
@@ -59,7 +59,7 @@ define void @f6(<2 x double> %val, <2 x
 ; Test the highest aligned in-range offset.
 define void @f7(<16 x i8> %val, <16 x i8> *%base) {
 ; CHECK-LABEL: f7:
-; CHECK: vst %v24, 4080(%r2)
+; CHECK: vst %v24, 4080(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
   store <16 x i8> %val, <16 x i8> *%ptr
@@ -81,7 +81,7 @@ define void @f8(<16 x i8> %val, i8 *%bas
 define void @f9(<16 x i8> %val, <16 x i8> *%base) {
 ; CHECK-LABEL: f9:
 ; CHECK: aghi %r2, 4096
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
   store <16 x i8> %val, <16 x i8> *%ptr
@@ -92,7 +92,7 @@ define void @f9(<16 x i8> %val, <16 x i8
 define void @f10(<16 x i8> %val, <16 x i8> *%base) {
 ; CHECK-LABEL: f10:
 ; CHECK: aghi %r2, -16
-; CHECK: vst %v24, 0(%r2)
+; CHECK: vst %v24, 0(%r2), 3
 ; CHECK: br %r14
   %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
   store <16 x i8> %val, <16 x i8> *%ptr
@@ -172,3 +172,13 @@ define void @f18(<2 x float> %val, <2 x
   store <2 x float> %val, <2 x float> *%ptr
   ret void
 }
+
+; Test quadword-aligned stores.
+define void @f19(<16 x i8> %val, <16 x i8> *%ptr) {
+; CHECK-LABEL: f19:
+; CHECK: vst %v24, 0(%r2), 4
+; CHECK: br %r14
+  store <16 x i8> %val, <16 x i8> *%ptr, align 16
+  ret void
+}
+

Modified: llvm/trunk/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll Wed Jun 19 07:20:00 2019
@@ -43,9 +43,9 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fdiv_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI1_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI1_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    vfddb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -122,11 +122,11 @@ define void @constrained_vector_fdiv_v3f
 ; SZ13-NEXT:    ldeb %f1, 0(%r1)
 ; SZ13-NEXT:    ddb %f1, 16(%r2)
 ; SZ13-NEXT:    larl %r1, .LCPI3_1
-; SZ13-NEXT:    vl %v0, 0(%r2)
-; SZ13-NEXT:    vl %v2, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
+; SZ13-NEXT:    vl %v2, 0(%r1), 3
 ; SZ13-NEXT:    std %f1, 16(%r2)
 ; SZ13-NEXT:    vfddb %v0, %v2, %v0
-; SZ13-NEXT:    vst %v0, 0(%r2)
+; SZ13-NEXT:    vst %v0, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -161,12 +161,12 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fdiv_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI4_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI4_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    vfddb %v26, %v1, %v0
 ; SZ13-NEXT:    larl %r1, .LCPI4_2
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    vfddb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -266,11 +266,11 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    vgmg %v0, 2, 11
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 176(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -342,19 +342,19 @@ define <3 x float> @constrained_vector_f
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmodf at PLT
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    vgmf %v0, 2, 8
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmodf at PLT
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    vgmf %v0, 1, 1
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmodf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -422,32 +422,32 @@ define void @constrained_vector_frem_v3f
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v2, 0(%r2)
+; SZ13-NEXT:    vl %v2, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    vgmg %v0, 2, 11
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v2, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v2, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f2d killed $f2d killed $v2
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v2, %v0, 1
 ; SZ13-NEXT:    vgmg %v0, 1, 1
 ; SZ13-NEXT:    # kill: def $f2d killed $f2d killed $v2
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
 ; SZ13-NEXT:    larl %r1, .LCPI8_0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -527,26 +527,26 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    vgmg %v0, 2, 11
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI9_1
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI9_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, fmod at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
@@ -603,9 +603,9 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fmul_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI11_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI11_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    vfmdb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -677,12 +677,12 @@ define void @constrained_vector_fmul_v3f
 ; SZ13-NEXT:    larl %r1, .LCPI13_0
 ; SZ13-NEXT:    ld %f1, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI13_1
-; SZ13-NEXT:    vl %v0, 0(%r2)
-; SZ13-NEXT:    vl %v2, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
+; SZ13-NEXT:    vl %v2, 0(%r1), 3
 ; SZ13-NEXT:    mdb %f1, 16(%r2)
 ; SZ13-NEXT:    vfmdb %v0, %v2, %v0
 ; SZ13-NEXT:    std %f1, 16(%r2)
-; SZ13-NEXT:    vst %v0, 0(%r2)
+; SZ13-NEXT:    vst %v0, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -718,12 +718,12 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fmul_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI14_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI14_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI14_2
 ; SZ13-NEXT:    vfmdb %v26, %v1, %v0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfmdb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -778,9 +778,9 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fadd_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI16_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI16_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    vfadb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -850,12 +850,12 @@ define void @constrained_vector_fadd_v3f
 ; SZ13-NEXT:    larl %r1, .LCPI18_0
 ; SZ13-NEXT:    ld %f1, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI18_1
-; SZ13-NEXT:    vl %v0, 0(%r2)
-; SZ13-NEXT:    vl %v2, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
+; SZ13-NEXT:    vl %v2, 0(%r1), 3
 ; SZ13-NEXT:    adb %f1, 16(%r2)
 ; SZ13-NEXT:    vfadb %v0, %v2, %v0
 ; SZ13-NEXT:    std %f1, 16(%r2)
-; SZ13-NEXT:    vst %v0, 0(%r2)
+; SZ13-NEXT:    vst %v0, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -891,12 +891,12 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fadd_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI19_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI19_1
-; SZ13-NEXT:    vl %v1, 0(%r1)
+; SZ13-NEXT:    vl %v1, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI19_2
 ; SZ13-NEXT:    vfadb %v26, %v1, %v0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfadb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -951,7 +951,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fsub_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI21_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vgmg %v1, 12, 10
 ; SZ13-NEXT:    vfsdb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
@@ -1023,13 +1023,13 @@ define void @constrained_vector_fsub_v3f
 ;
 ; SZ13-LABEL: constrained_vector_fsub_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    vgmg %v2, 12, 10
 ; SZ13-NEXT:    sdb %f2, 16(%r2)
 ; SZ13-NEXT:    vgmg %v1, 12, 10
 ; SZ13-NEXT:    vfsdb %v0, %v1, %v0
 ; SZ13-NEXT:    std %f2, 16(%r2)
-; SZ13-NEXT:    vst %v0, 0(%r2)
+; SZ13-NEXT:    vst %v0, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -1066,11 +1066,11 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_fsub_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI24_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vgmg %v1, 12, 10
 ; SZ13-NEXT:    larl %r1, .LCPI24_1
 ; SZ13-NEXT:    vfsdb %v26, %v1, %v0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfsdb %v24, %v1, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -1118,7 +1118,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_sqrt_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI26_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfsqdb %v24, %v0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -1176,10 +1176,10 @@ define void @constrained_vector_sqrt_v3f
 ; SZ13-LABEL: constrained_vector_sqrt_v3f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    sqdb %f1, 16(%r2)
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    std %f1, 16(%r2)
 ; SZ13-NEXT:    vfsqdb %v0, %v0
-; SZ13-NEXT:    vst %v0, 0(%r2)
+; SZ13-NEXT:    vst %v0, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -1208,10 +1208,10 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_sqrt_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI29_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfsqdb %v26, %v0
 ; SZ13-NEXT:    larl %r1, .LCPI29_1
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfsqdb %v24, %v0
 ; SZ13-NEXT:    br %r14
  entry:
@@ -1312,11 +1312,11 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI31_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 176(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -1389,20 +1389,20 @@ define <3 x float> @constrained_vector_p
 ; SZ13-NEXT:    brasl %r14, powf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI32_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, powf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI32_3
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, powf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -1474,32 +1474,32 @@ define void @constrained_vector_pow_v3f6
 ; SZ13-NEXT:    .cfi_offset %f9, -176
 ; SZ13-NEXT:    larl %r1, .LCPI33_0
 ; SZ13-NEXT:    ldeb %f9, 0(%r1)
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 200(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    ld %f9, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 312(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -1581,26 +1581,26 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI34_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI34_3
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI34_4
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    ldr %f2, %f8
 ; SZ13-NEXT:    brasl %r14, pow at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
@@ -1695,11 +1695,11 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI36_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -1762,20 +1762,20 @@ define <3 x float> @constrained_vector_p
 ; SZ13-NEXT:    brasl %r14, __powisf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI37_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powisf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI37_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powisf2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -1842,21 +1842,21 @@ define void @constrained_vector_powi_v3f
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI38_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI38_2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 280(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -1929,26 +1929,26 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI39_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI39_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI39_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    lghi %r2, 3
 ; SZ13-NEXT:    brasl %r14, __powidf2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2035,10 +2035,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI41_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -2096,18 +2096,18 @@ define <3 x float> @constrained_vector_s
 ; SZ13-NEXT:    brasl %r14, sinf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI42_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sinf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI42_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sinf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2165,28 +2165,28 @@ define void @constrained_vector_sin_v3f6
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -2253,23 +2253,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI44_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI44_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI44_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, sin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2355,10 +2355,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI46_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -2416,18 +2416,18 @@ define <3 x float> @constrained_vector_c
 ; SZ13-NEXT:    brasl %r14, cosf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI47_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cosf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI47_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cosf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2485,28 +2485,28 @@ define void @constrained_vector_cos_v3f6
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -2573,23 +2573,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI49_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI49_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI49_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, cos at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2675,10 +2675,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI51_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -2736,18 +2736,18 @@ define <3 x float> @constrained_vector_e
 ; SZ13-NEXT:    brasl %r14, expf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI52_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, expf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI52_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, expf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2805,28 +2805,28 @@ define void @constrained_vector_exp_v3f6
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -2893,23 +2893,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI54_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI54_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI54_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -2995,10 +2995,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI56_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -3056,18 +3056,18 @@ define <3 x float> @constrained_vector_e
 ; SZ13-NEXT:    brasl %r14, exp2f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI57_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI57_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2f at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3125,28 +3125,28 @@ define void @constrained_vector_exp2_v3f
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -3213,23 +3213,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI59_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI59_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI59_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, exp2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3315,10 +3315,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI61_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -3376,18 +3376,18 @@ define <3 x float> @constrained_vector_l
 ; SZ13-NEXT:    brasl %r14, logf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI62_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, logf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI62_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, logf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3445,28 +3445,28 @@ define void @constrained_vector_log_v3f6
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -3533,23 +3533,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI64_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI64_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI64_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3635,10 +3635,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI66_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -3696,18 +3696,18 @@ define <3 x float> @constrained_vector_l
 ; SZ13-NEXT:    brasl %r14, log10f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI67_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI67_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10f at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3765,28 +3765,28 @@ define void @constrained_vector_log10_v3
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -3853,23 +3853,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI69_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI69_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI69_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log10 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -3955,10 +3955,10 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI71_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -4016,18 +4016,18 @@ define <3 x float> @constrained_vector_l
 ; SZ13-NEXT:    brasl %r14, log2f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI72_1
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2f at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI72_2
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2f at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -4085,28 +4085,28 @@ define void @constrained_vector_log2_v3f
 ; SZ13-NEXT:    .cfi_def_cfa_offset 360
 ; SZ13-NEXT:    std %f8, 192(%r15) # 8-byte Folded Spill
 ; SZ13-NEXT:    .cfi_offset %f8, -168
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4173,23 +4173,23 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI74_1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI74_2
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI74_3
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ld %f0, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, log2 at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -4240,7 +4240,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_rint_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI76_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 0, 0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4304,12 +4304,12 @@ define void @constrained_vector_rint_v3f
 ;
 ; SZ13-LABEL: constrained_vector_rint_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 0, 0
 ; SZ13-NEXT:    fidbra %f0, 0, %f0, 0
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -4341,10 +4341,10 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_rint_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI79_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI79_1
 ; SZ13-NEXT:    vfidb %v24, %v0, 0, 0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v26, %v0, 0, 0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4412,7 +4412,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_nearbyint_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI81_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4516,12 +4516,12 @@ define void @constrained_vector_nearbyin
 ;
 ; SZ13-LABEL: constrained_vector_nearbyint_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 4, 0
 ; SZ13-NEXT:    fidbra %f0, 0, %f0, 4
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -4578,10 +4578,10 @@ define <4 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_nearbyint_v4f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI84_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    larl %r1, .LCPI84_1
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v26, %v0, 4, 0
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4675,12 +4675,12 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI86_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI86_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -4754,20 +4754,20 @@ define <3 x float> @constrained_vector_m
 ; SZ13-NEXT:    larl %r1, .LCPI87_2
 ; SZ13-NEXT:    lde %f2, 0(%r1)
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, fmaxf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI87_3
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI87_4
 ; SZ13-NEXT:    lde %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmaxf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -4835,32 +4835,32 @@ define void @constrained_vector_log10_ma
 ; SZ13-NEXT:    .cfi_offset %f8, -168
 ; SZ13-NEXT:    larl %r1, .LCPI88_0
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI88_1
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI88_2
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 304(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -4938,29 +4938,29 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI89_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI89_4
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_5
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI89_6
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI89_7
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmax at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -5058,12 +5058,12 @@ define <2 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI91_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI91_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 288(%r15)
@@ -5137,20 +5137,20 @@ define <3 x float> @constrained_vector_m
 ; SZ13-NEXT:    larl %r1, .LCPI92_2
 ; SZ13-NEXT:    lde %f2, 0(%r1)
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    brasl %r14, fminf at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI92_3
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    lde %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI92_4
 ; SZ13-NEXT:    lde %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fminf at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0s killed $f0s def $v0
 ; SZ13-NEXT:    vmrhf %v0, %v1, %v0
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 192(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    vrepf %v1, %v1, 0
 ; SZ13-NEXT:    vmrhg %v24, %v0, %v1
@@ -5222,32 +5222,32 @@ define void @constrained_vector_minnum_v
 ; SZ13-NEXT:    .cfi_offset %f9, -176
 ; SZ13-NEXT:    larl %r1, .LCPI93_0
 ; SZ13-NEXT:    ldeb %f9, 0(%r1)
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    ld %f8, 16(%r2)
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    lgr %r13, %r2
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
-; SZ13-NEXT:    vl %v0, 160(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
+; SZ13-NEXT:    vl %v0, 160(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    vrepg %v0, %v0, 1
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d killed $v0
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v1, %v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldr %f0, %f8
 ; SZ13-NEXT:    ldr %f2, %f9
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    std %f0, 16(%r13)
-; SZ13-NEXT:    vl %v0, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v0, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    ld %f8, 200(%r15) # 8-byte Folded Reload
 ; SZ13-NEXT:    ld %f9, 192(%r15) # 8-byte Folded Reload
-; SZ13-NEXT:    vst %v0, 0(%r13)
+; SZ13-NEXT:    vst %v0, 0(%r13), 4
 ; SZ13-NEXT:    lmg %r13, %r15, 312(%r15)
 ; SZ13-NEXT:    br %r14
 entry:
@@ -5325,29 +5325,29 @@ define <4 x double> @constrained_vector_
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI94_2
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_3
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v0, %v0, %v1
 ; SZ13-NEXT:    larl %r1, .LCPI94_4
-; SZ13-NEXT:    vst %v0, 176(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 176(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_5
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
 ; SZ13-NEXT:    larl %r1, .LCPI94_6
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
-; SZ13-NEXT:    vst %v0, 160(%r15) # 16-byte Folded Spill
+; SZ13-NEXT:    vst %v0, 160(%r15), 3 # 16-byte Folded Spill
 ; SZ13-NEXT:    ldeb %f0, 0(%r1)
 ; SZ13-NEXT:    larl %r1, .LCPI94_7
 ; SZ13-NEXT:    ldeb %f2, 0(%r1)
 ; SZ13-NEXT:    brasl %r14, fmin at PLT
-; SZ13-NEXT:    vl %v1, 160(%r15) # 16-byte Folded Reload
-; SZ13-NEXT:    vl %v24, 176(%r15) # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v1, 160(%r15), 3 # 16-byte Folded Reload
+; SZ13-NEXT:    vl %v24, 176(%r15), 3 # 16-byte Folded Reload
 ; SZ13-NEXT:    # kill: def $f0d killed $f0d def $v0
 ; SZ13-NEXT:    vmrhg %v26, %v0, %v1
 ; SZ13-NEXT:    lmg %r14, %r15, 304(%r15)
@@ -5435,7 +5435,7 @@ define void @constrained_vector_fptrunc_
 ;
 ; SZ13-LABEL: constrained_vector_fptrunc_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ledbra %f2, 0, %f1, 0
 ; SZ13-NEXT:    vrepg %v1, %v1, 1
 ; SZ13-NEXT:    ld %f0, 16(%r2)
@@ -5568,7 +5568,7 @@ define void @constrained_vector_fpext_v3
 ;
 ; SZ13-LABEL: constrained_vector_fpext_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v0, 0(%r2)
+; SZ13-NEXT:    vl %v0, 0(%r2), 4
 ; SZ13-NEXT:    vrepf %v2, %v0, 1
 ; SZ13-NEXT:    ldebr %f1, %f0
 ; SZ13-NEXT:    ldebr %f2, %f2
@@ -5576,7 +5576,7 @@ define void @constrained_vector_fpext_v3
 ; SZ13-NEXT:    ldebr %f0, %f0
 ; SZ13-NEXT:    vmrhg %v1, %v1, %v2
 ; SZ13-NEXT:    std %f0, 16(%r3)
-; SZ13-NEXT:    vst %v1, 0(%r3)
+; SZ13-NEXT:    vst %v1, 0(%r3), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x float>, <3 x float>* %src
@@ -5684,7 +5684,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_ceil_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI104_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 6
 ; SZ13-NEXT:    br %r14
 entry:
@@ -5787,12 +5787,12 @@ define void @constrained_vector_ceil_v3f
 ;
 ; SZ13-LABEL: constrained_vector_ceil_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 4, 6
 ; SZ13-NEXT:    fidbra %f0, 6, %f0, 4
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -5860,7 +5860,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_floor_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI108_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 7
 ; SZ13-NEXT:    br %r14
 entry:
@@ -5963,12 +5963,12 @@ define void @constrained_vector_floor_v3
 ;
 ; SZ13-LABEL: constrained_vector_floor_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 4, 7
 ; SZ13-NEXT:    fidbra %f0, 7, %f0, 4
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -6035,7 +6035,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_round_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI112_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 1
 ; SZ13-NEXT:    br %r14
 entry:
@@ -6139,12 +6139,12 @@ define void @constrained_vector_round_v3
 ;
 ; SZ13-LABEL: constrained_vector_round_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 4, 1
 ; SZ13-NEXT:    fidbra %f0, 1, %f0, 4
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a
@@ -6211,7 +6211,7 @@ define <2 x double> @constrained_vector_
 ; SZ13-LABEL: constrained_vector_trunc_v2f64:
 ; SZ13:       # %bb.0: # %entry
 ; SZ13-NEXT:    larl %r1, .LCPI116_0
-; SZ13-NEXT:    vl %v0, 0(%r1)
+; SZ13-NEXT:    vl %v0, 0(%r1), 3
 ; SZ13-NEXT:    vfidb %v24, %v0, 4, 5
 ; SZ13-NEXT:    br %r14
 entry:
@@ -6314,12 +6314,12 @@ define void @constrained_vector_trunc_v3
 ;
 ; SZ13-LABEL: constrained_vector_trunc_v3f64:
 ; SZ13:       # %bb.0: # %entry
-; SZ13-NEXT:    vl %v1, 0(%r2)
+; SZ13-NEXT:    vl %v1, 0(%r2), 4
 ; SZ13-NEXT:    ld %f0, 16(%r2)
 ; SZ13-NEXT:    vfidb %v1, %v1, 4, 5
 ; SZ13-NEXT:    fidbra %f0, 5, %f0, 4
 ; SZ13-NEXT:    std %f0, 16(%r2)
-; SZ13-NEXT:    vst %v1, 0(%r2)
+; SZ13-NEXT:    vst %v1, 0(%r2), 4
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, <3 x double>* %a

Modified: llvm/trunk/test/MC/Disassembler/SystemZ/insns-z13.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/SystemZ/insns-z13.txt?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/SystemZ/insns-z13.txt (original)
+++ llvm/trunk/test/MC/Disassembler/SystemZ/insns-z13.txt Wed Jun 19 07:20:00 2019
@@ -2363,6 +2363,9 @@
 # CHECK: vl  %v0, 0
 0xe7 0x00 0x00 0x00 0x00 0x06
 
+# CHECK: vl  %v0, 0, 4
+0xe7 0x00 0x00 0x00 0x40 0x06
+
 # CHECK: vl  %v17, 2475(%r7,%r8)
 0xe7 0x17 0x89 0xab 0x08 0x06
 
@@ -2633,6 +2636,9 @@
 # CHECK: vlm  %v0, %v0, 0
 0xe7 0x00 0x00 0x00 0x00 0x36
 
+# CHECK: vlm  %v0, %v0, 0, 4
+0xe7 0x00 0x00 0x00 0x40 0x36
+
 # CHECK: vlm  %v12, %v18, 1110(%r3)
 0xe7 0xc2 0x34 0x56 0x04 0x36
 
@@ -4118,6 +4124,9 @@
 # CHECK: vst  %v0, 0
 0xe7 0x00 0x00 0x00 0x00 0x0E
 
+# CHECK: vst  %v0, 0, 4
+0xe7 0x00 0x00 0x00 0x40 0x0E
+
 # CHECK: vst  %v17, 2475(%r7,%r8)
 0xe7 0x17 0x89 0xab 0x08 0x0E
 
@@ -4172,6 +4181,9 @@
 # CHECK: vstm %v0, %v0, 0
 0xe7 0x00 0x00 0x00 0x00 0x3e
 
+# CHECK: vstm %v0, %v0, 0, 4
+0xe7 0x00 0x00 0x00 0x40 0x3e
+
 # CHECK: vstm %v12, %v18, 1110(%r3)
 0xe7 0xc2 0x34 0x56 0x04 0x3e
 

Modified: llvm/trunk/test/MC/SystemZ/insn-bad-z13.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/SystemZ/insn-bad-z13.s?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/MC/SystemZ/insn-bad-z13.s (original)
+++ llvm/trunk/test/MC/SystemZ/insn-bad-z13.s Wed Jun 19 07:20:00 2019
@@ -1686,10 +1686,16 @@
 #CHECK: vl	%v0, 4096
 #CHECK: error: invalid use of vector addressing
 #CHECK: vl	%v0, 0(%v1,%r2)
+#CHECK: error: invalid operand
+#CHECK: vl	%v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vl	%v0, 0, 16
 
 	vl	%v0, -1
 	vl	%v0, 4096
 	vl	%v0, 0(%v1,%r2)
+	vl	%v0, 0, -1
+	vl	%v0, 0, 16
 
 #CHECK: error: invalid operand
 #CHECK: vlbb	%v0, 0, -1
@@ -2013,9 +2019,15 @@
 #CHECK: vlm	%v0, %v0, -1
 #CHECK: error: invalid operand
 #CHECK: vlm	%v0, %v0, 4096
+#CHECK: error: invalid operand
+#CHECK: vlm	%v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vlm	%v0, %v0, 0, 16
 
 	vlm	%v0, %v0, -1
 	vlm	%v0, %v0, 4096
+	vlm	%v0, %v0, 0, -1
+	vlm	%v0, %v0, 0, 16
 
 #CHECK: error: invalid operand
 #CHECK: vlrep	%v0, 0, -1
@@ -2380,10 +2392,16 @@
 #CHECK: vst	%v0, 4096
 #CHECK: error: invalid use of vector addressing
 #CHECK: vst	%v0, 0(%v1,%r2)
+#CHECK: error: invalid operand
+#CHECK: vst	%v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vst	%v0, 0, 16
 
 	vst	%v0, -1
 	vst	%v0, 4096
 	vst	%v0, 0(%v1,%r2)
+	vst	%v0, 0, -1
+	vst	%v0, 0, 16
 
 #CHECK: error: invalid operand
 #CHECK: vsteb	%v0, 0, -1
@@ -2468,9 +2486,15 @@
 #CHECK: vstm	%v0, %v0, -1
 #CHECK: error: invalid operand
 #CHECK: vstm	%v0, %v0, 4096
+#CHECK: error: invalid operand
+#CHECK: vstm	%v0, %v0, 0, -1
+#CHECK: error: invalid operand
+#CHECK: vstm	%v0, %v0, 0, 16
 
 	vstm	%v0, %v0, -1
 	vstm	%v0, %v0, 4096
+	vstm	%v0, %v0, 0, -1
+	vstm	%v0, %v0, 0, 16
 
 #CHECK: error: invalid operand
 #CHECK: vstrc    %v0, %v0, %v0, %v0, 0, -1

Modified: llvm/trunk/test/MC/SystemZ/insn-good-z13.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/SystemZ/insn-good-z13.s?rev=363806&r1=363805&r2=363806&view=diff
==============================================================================
--- llvm/trunk/test/MC/SystemZ/insn-good-z13.s (original)
+++ llvm/trunk/test/MC/SystemZ/insn-good-z13.s Wed Jun 19 07:20:00 2019
@@ -3215,17 +3215,19 @@
 #CHECK: vl      %v0, 4095               # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x06]
 #CHECK: vl      %v0, 0(%r15)            # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x06]
 #CHECK: vl      %v0, 0(%r15,%r1)        # encoding: [0xe7,0x0f,0x10,0x00,0x00,0x06]
+#CHECK: vl      %v0, 0(%r15,%r1), 4     # encoding: [0xe7,0x0f,0x10,0x00,0x40,0x06]
 #CHECK: vl      %v15, 0                 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x06]
 #CHECK: vl      %v31, 0                 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x06]
-#CHECK: vl      %v18, 1383(%r3,%r4)     # encoding: [0xe7,0x23,0x45,0x67,0x08,0x06]
+#CHECK: vl      %v18, 1383(%r3,%r4), 3  # encoding: [0xe7,0x23,0x45,0x67,0x38,0x06]
 
 	vl	%v0, 0
 	vl	%v0, 4095
 	vl	%v0, 0(%r15)
 	vl	%v0, 0(%r15,%r1)
+	vl	%v0, 0(%r15,%r1), 4
 	vl	%v15, 0
 	vl	%v31, 0
-	vl	%v18, 0x567(%r3,%r4)
+	vl	%v18, 0x567(%r3,%r4), 3
 
 #CHECK: vlbb    %v0, 0, 0               # encoding: [0xe7,0x00,0x00,0x00,0x00,0x07]
 #CHECK: vlbb    %v0, 0, 15              # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x07]
@@ -3702,16 +3704,18 @@
 #CHECK: vlm     %v0, %v0, 0             # encoding: [0xe7,0x00,0x00,0x00,0x00,0x36]
 #CHECK: vlm     %v0, %v0, 4095          # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x36]
 #CHECK: vlm     %v0, %v0, 0(%r15)       # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x36]
+#CHECK: vlm     %v0, %v0, 0(%r15), 4    # encoding: [0xe7,0x00,0xf0,0x00,0x40,0x36]
 #CHECK: vlm     %v0, %v31, 0            # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x36]
 #CHECK: vlm     %v31, %v0, 0            # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x36]
-#CHECK: vlm     %v14, %v17, 1074(%r5)   # encoding: [0xe7,0xe1,0x54,0x32,0x04,0x36]
+#CHECK: vlm     %v14, %v17, 1074(%r5), 3 # encoding: [0xe7,0xe1,0x54,0x32,0x34,0x36]
 
 	vlm	%v0, %v0, 0
 	vlm	%v0, %v0, 4095
 	vlm	%v0, %v0, 0(%r15)
+	vlm	%v0, %v0, 0(%r15), 4
 	vlm	%v0, %v31, 0
 	vlm	%v31, %v0, 0
-	vlm	%v14, %v17, 1074(%r5)
+	vlm	%v14, %v17, 1074(%r5), 3
 
 #CHECK: vlp     %v0, %v0, 0             # encoding: [0xe7,0x00,0x00,0x00,0x00,0xdf]
 #CHECK: vlp     %v0, %v0, 15            # encoding: [0xe7,0x00,0x00,0x00,0xf0,0xdf]
@@ -6081,17 +6085,19 @@
 #CHECK: vst     %v0, 4095               # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x0e]
 #CHECK: vst     %v0, 0(%r15)            # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x0e]
 #CHECK: vst     %v0, 0(%r15,%r1)        # encoding: [0xe7,0x0f,0x10,0x00,0x00,0x0e]
+#CHECK: vst     %v0, 0(%r15,%r1), 4     # encoding: [0xe7,0x0f,0x10,0x00,0x40,0x0e]
 #CHECK: vst     %v15, 0                 # encoding: [0xe7,0xf0,0x00,0x00,0x00,0x0e]
 #CHECK: vst     %v31, 0                 # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x0e]
-#CHECK: vst     %v18, 1383(%r3,%r4)     # encoding: [0xe7,0x23,0x45,0x67,0x08,0x0e]
+#CHECK: vst     %v18, 1383(%r3,%r4), 3  # encoding: [0xe7,0x23,0x45,0x67,0x38,0x0e]
 
 	vst	%v0, 0
 	vst	%v0, 4095
 	vst	%v0, 0(%r15)
 	vst	%v0, 0(%r15,%r1)
+	vst	%v0, 0(%r15,%r1), 4
 	vst	%v15, 0
 	vst	%v31, 0
-	vst	%v18, 0x567(%r3,%r4)
+	vst	%v18, 0x567(%r3,%r4), 3
 
 #CHECK: vsteb   %v0, 0, 0               # encoding: [0xe7,0x00,0x00,0x00,0x00,0x08]
 #CHECK: vsteb   %v0, 0, 15              # encoding: [0xe7,0x00,0x00,0x00,0xf0,0x08]
@@ -6184,16 +6190,18 @@
 #CHECK: vstm    %v0, %v0, 0             # encoding: [0xe7,0x00,0x00,0x00,0x00,0x3e]
 #CHECK: vstm    %v0, %v0, 4095          # encoding: [0xe7,0x00,0x0f,0xff,0x00,0x3e]
 #CHECK: vstm    %v0, %v0, 0(%r15)       # encoding: [0xe7,0x00,0xf0,0x00,0x00,0x3e]
+#CHECK: vstm    %v0, %v0, 0(%r15), 4    # encoding: [0xe7,0x00,0xf0,0x00,0x40,0x3e]
 #CHECK: vstm    %v0, %v31, 0            # encoding: [0xe7,0x0f,0x00,0x00,0x04,0x3e]
 #CHECK: vstm    %v31, %v0, 0            # encoding: [0xe7,0xf0,0x00,0x00,0x08,0x3e]
-#CHECK: vstm    %v14, %v17, 1074(%r5)   # encoding: [0xe7,0xe1,0x54,0x32,0x04,0x3e]
+#CHECK: vstm    %v14, %v17, 1074(%r5), 3 # encoding: [0xe7,0xe1,0x54,0x32,0x34,0x3e]
 
 	vstm	%v0, %v0, 0
 	vstm	%v0, %v0, 4095
 	vstm	%v0, %v0, 0(%r15)
+	vstm	%v0, %v0, 0(%r15), 4
 	vstm	%v0, %v31, 0
 	vstm	%v31, %v0, 0
-	vstm	%v14, %v17, 1074(%r5)
+	vstm	%v14, %v17, 1074(%r5), 3
 
 #CHECK: vstrc    %v0, %v0, %v0, %v0, 0, 0    # encoding: [0xe7,0x00,0x00,0x00,0x00,0x8a]
 #CHECK: vstrc    %v0, %v0, %v0, %v0, 15, 0   # encoding: [0xe7,0x00,0x0f,0x00,0x00,0x8a]




More information about the llvm-commits mailing list