[llvm] f3b5597 - [RISCV] Use larger copies when register tuples are aligned

via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 7 22:25:01 PDT 2024


Author: Pengcheng Wang
Date: 2024-04-08T13:24:57+08:00
New Revision: f3b55973645d551d67af7662b815a5f415874ff7

URL: https://github.com/llvm/llvm-project/commit/f3b55973645d551d67af7662b815a5f415874ff7
DIFF: https://github.com/llvm/llvm-project/commit/f3b55973645d551d67af7662b815a5f415874ff7.diff

LOG: [RISCV] Use larger copies when register tuples are aligned

When the encoding of register tuples are aligned, we can use a copy
with larger LMUL to reduce copies.

Reviewers: preames, topperc, lukel97

Reviewed By: topperc, lukel97

Pull Request: https://github.com/llvm/llvm-project/pull/84455

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 5582de51b17d19..a1befaf40d09f7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -302,95 +302,108 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
                                        RISCVII::VLMUL LMul, unsigned NF) const {
   const TargetRegisterInfo *TRI = STI.getRegisterInfo();
 
-  unsigned Opc;
-  unsigned SubRegIdx;
-  unsigned VVOpc, VIOpc;
-  switch (LMul) {
-  default:
-    llvm_unreachable("Impossible LMUL for vector register copy.");
-  case RISCVII::LMUL_1:
-    Opc = RISCV::VMV1R_V;
-    SubRegIdx = RISCV::sub_vrm1_0;
-    VVOpc = RISCV::PseudoVMV_V_V_M1;
-    VIOpc = RISCV::PseudoVMV_V_I_M1;
-    break;
-  case RISCVII::LMUL_2:
-    Opc = RISCV::VMV2R_V;
-    SubRegIdx = RISCV::sub_vrm2_0;
-    VVOpc = RISCV::PseudoVMV_V_V_M2;
-    VIOpc = RISCV::PseudoVMV_V_I_M2;
-    break;
-  case RISCVII::LMUL_4:
-    Opc = RISCV::VMV4R_V;
-    SubRegIdx = RISCV::sub_vrm4_0;
-    VVOpc = RISCV::PseudoVMV_V_V_M4;
-    VIOpc = RISCV::PseudoVMV_V_I_M4;
-    break;
-  case RISCVII::LMUL_8:
-    assert(NF == 1);
-    Opc = RISCV::VMV8R_V;
-    SubRegIdx = RISCV::sub_vrm1_0; // There is no sub_vrm8_0.
-    VVOpc = RISCV::PseudoVMV_V_V_M8;
-    VIOpc = RISCV::PseudoVMV_V_I_M8;
-    break;
-  }
-
-  bool UseVMV_V_V = false;
-  bool UseVMV_V_I = false;
-  MachineBasicBlock::const_iterator DefMBBI;
-  if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
-    UseVMV_V_V = true;
-    Opc = VVOpc;
-
-    if (DefMBBI->getOpcode() == VIOpc) {
-      UseVMV_V_I = true;
-      Opc = VIOpc;
+  uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
+  uint16_t DstEncoding = TRI->getEncodingValue(DstReg);
+  auto [LMulVal, Fractional] = RISCVVType::decodeVLMUL(LMul);
+  assert(!Fractional && "It is impossible be fractional lmul here.");
+  unsigned NumRegs = NF * LMulVal;
+  bool ReversedCopy =
+      forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NumRegs);
+  if (ReversedCopy) {
+    // If the src and dest overlap when copying a tuple, we need to copy the
+    // registers in reverse.
+    SrcEncoding += NumRegs - 1;
+    DstEncoding += NumRegs - 1;
+  }
+
+  unsigned I = 0;
+  auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
+      -> std::tuple<RISCVII::VLMUL, const TargetRegisterClass &, unsigned,
+                    unsigned, unsigned> {
+    if (ReversedCopy) {
+      // For reversed copying, if there are enough aligned registers(8/4/2), we
+      // can do a larger copy(LMUL8/4/2).
+      // Besides, we have already known that DstEncoding is larger than
+      // SrcEncoding in forwardCopyWillClobberTuple, so the 
diff erence between
+      // DstEncoding and SrcEncoding should be >= LMUL value we try to use to
+      // avoid clobbering.
+      uint16_t Diff = DstEncoding - SrcEncoding;
+      if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
+          DstEncoding % 8 == 7)
+        return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+                RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
+      if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
+          DstEncoding % 4 == 3)
+        return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+                RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
+      if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
+          DstEncoding % 2 == 1)
+        return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+                RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
+      // Or we should do LMUL1 copying.
+      return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+              RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
     }
-  }
 
-  if (NF == 1) {
-    auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
-    if (UseVMV_V_V)
-      MIB.addReg(DstReg, RegState::Undef);
-    if (UseVMV_V_I)
-      MIB = MIB.add(DefMBBI->getOperand(2));
-    else
-      MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
-    if (UseVMV_V_V) {
-      const MCInstrDesc &Desc = DefMBBI->getDesc();
-      MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc)));  // AVL
-      MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
-      MIB.addImm(0);                                            // tu, mu
-      MIB.addReg(RISCV::VL, RegState::Implicit);
-      MIB.addReg(RISCV::VTYPE, RegState::Implicit);
+    // For forward copying, if source register encoding and destination register
+    // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying.
+    if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
+      return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+              RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
+    if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
+      return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+              RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
+    if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
+      return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+              RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
+    // Or we should do LMUL1 copying.
+    return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+            RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
+  };
+  auto FindRegWithEncoding = [&TRI](const TargetRegisterClass &RegClass,
+                                    uint16_t Encoding) {
+    ArrayRef<MCPhysReg> Regs = RegClass.getRegisters();
+    const auto *FoundReg = llvm::find_if(Regs, [&](MCPhysReg Reg) {
+      return TRI->getEncodingValue(Reg) == Encoding;
+    });
+    // We should be always able to find one valid register.
+    assert(FoundReg != Regs.end());
+    return *FoundReg;
+  };
+  while (I != NumRegs) {
+    // For non-segment copying, we only do this once as the registers are always
+    // aligned.
+    // For segment copying, we may do this several times. If the registers are
+    // aligned to larger LMUL, we can eliminate some copyings.
+    auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
+        GetCopyInfo(SrcEncoding, DstEncoding);
+    auto [NumCopied, _] = RISCVVType::decodeVLMUL(LMulCopied);
+
+    MachineBasicBlock::const_iterator DefMBBI;
+    if (LMul == LMulCopied &&
+        isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
+      Opc = VVOpc;
+      if (DefMBBI->getOpcode() == VIOpc)
+        Opc = VIOpc;
     }
-    return;
-  }
 
-  int I = 0, End = NF, Incr = 1;
-  unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
-  unsigned DstEncoding = TRI->getEncodingValue(DstReg);
-  unsigned LMulVal;
-  bool Fractional;
-  std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
-  assert(!Fractional && "It is impossible be fractional lmul here.");
-  if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
-    I = NF - 1;
-    End = -1;
-    Incr = -1;
-  }
-
-  for (; I != End; I += Incr) {
-    auto MIB =
-        BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I));
-    if (UseVMV_V_V)
-      MIB.addReg(TRI->getSubReg(DstReg, SubRegIdx + I), RegState::Undef);
+    // Emit actual copying.
+    // For reversed copying, the encoding should be decreased.
+    MCRegister ActualSrcReg = FindRegWithEncoding(
+        RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
+    MCRegister ActualDstReg = FindRegWithEncoding(
+        RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
+
+    auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), ActualDstReg);
+    bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_I;
+    bool UseVMV = UseVMV_V_I || RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_V;
+    if (UseVMV)
+      MIB.addReg(ActualDstReg, RegState::Undef);
     if (UseVMV_V_I)
       MIB = MIB.add(DefMBBI->getOperand(2));
     else
-      MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
-                       getKillRegState(KillSrc));
-    if (UseVMV_V_V) {
+      MIB = MIB.addReg(ActualSrcReg, getKillRegState(KillSrc));
+    if (UseVMV) {
       const MCInstrDesc &Desc = DefMBBI->getDesc();
       MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc)));  // AVL
       MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
@@ -398,6 +411,11 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
       MIB.addReg(RISCV::VL, RegState::Implicit);
       MIB.addReg(RISCV::VTYPE, RegState::Implicit);
     }
+
+    // If we are copying reversely, we should decrease the encoding.
+    SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
+    DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
+    I += NumCopied;
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 4fa29e174602d0..5bb6ce250e8db7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -8,7 +8,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 82 = e32,m4
     ; CHECK-LABEL: name: copy_
diff erent_lmul
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -25,7 +24,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 82 = e32,m4
     ; CHECK-LABEL: name: copy_convert_to_vmv_v_v
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -42,7 +40,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14
-    ; 82 = e32,m4
     ; CHECK-LABEL: name: copy_convert_to_vmv_v_i
     ; CHECK: liveins: $x14
     ; CHECK-NEXT: {{  $}}
@@ -59,7 +56,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 82 = e32,m4
     ; CHECK-LABEL: name: copy_from_whole_load_store
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -76,7 +72,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 82 = e32,m4
     ; CHECK-LABEL: name: copy_with_vleff
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -95,8 +90,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16, $x17, $x18
-    ; 82 = e32,m4
-    ; 73 = e16,m2
     ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_1
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
@@ -121,8 +114,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16, $x17, $x18
-    ; 82 = e32,m4
-    ; 73 = e16,m2
     ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_2
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
@@ -147,8 +138,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16, $x17, $x18
-    ; 82 = e32,m4
-    ; 73 = e16,m2
     ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_3
     ; CHECK: liveins: $x14, $x16, $x17, $x18
     ; CHECK-NEXT: {{  $}}
@@ -169,7 +158,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x16, $x17
-    ; 73 = e16,m2
     ; CHECK-LABEL: name: copy_subregister
     ; CHECK: liveins: $x16, $x17
     ; CHECK-NEXT: {{  $}}
@@ -191,8 +179,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 82 = e32,m4
-    ; 74 = e16,m4
     ; CHECK-LABEL: name: copy_with_
diff erent_vlmax
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -231,7 +217,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 80 = e32,m1
     ; CHECK-LABEL: name: copy_zvlsseg_reg
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -248,14 +233,12 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 80 = e32,m1
     ; CHECK-LABEL: name: copy_zvlsseg_reg_2
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+    ; CHECK-NEXT: $v10m2 = VMV2R_V $v8m2
     $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
     $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
     $v10_v11 = COPY $v8_v9
@@ -266,7 +249,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x14, $x16
-    ; 87 = e32,mf2
     ; CHECK-LABEL: name: copy_fractional_lmul
     ; CHECK: liveins: $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -283,7 +265,6 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $x12, $x14, $x16
-    ; 80 = e32,m1
     ; CHECK-LABEL: name: copy_implicit_def
     ; CHECK: liveins: $x12, $x14, $x16
     ; CHECK-NEXT: {{  $}}
@@ -291,14 +272,7 @@ body:             |
     ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
-    ; CHECK-NEXT: $v24 = VMV1R_V killed $v8
-    ; CHECK-NEXT: $v25 = VMV1R_V killed $v9
-    ; CHECK-NEXT: $v26 = VMV1R_V killed $v10
-    ; CHECK-NEXT: $v27 = VMV1R_V killed $v11
-    ; CHECK-NEXT: $v28 = VMV1R_V killed $v12
-    ; CHECK-NEXT: $v29 = VMV1R_V killed $v13
-    ; CHECK-NEXT: $v30 = VMV1R_V killed $v14
-    ; CHECK-NEXT: $v31 = VMV1R_V killed $v15
+    ; CHECK-NEXT: $v24m8 = VMV8R_V killed $v8m8
     $x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
     $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype
     $x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
index 85bb54471ed3c8..a44a93449332fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
@@ -7,30 +7,24 @@ name: copy_zvlsseg_N2
 body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N2
-    ; CHECK: $v2 = VMV1R_V $v4
-    ; CHECK-NEXT: $v3 = VMV1R_V $v5
+    ; CHECK: $v2m2 = VMV2R_V $v4m2
     ; CHECK-NEXT: $v3 = VMV1R_V $v4
     ; CHECK-NEXT: $v4 = VMV1R_V $v5
     ; CHECK-NEXT: $v6 = VMV1R_V $v5
     ; CHECK-NEXT: $v5 = VMV1R_V $v4
-    ; CHECK-NEXT: $v6 = VMV1R_V $v4
-    ; CHECK-NEXT: $v7 = VMV1R_V $v5
-    ; CHECK-NEXT: $v0m2 = VMV2R_V $v4m2
-    ; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2
+    ; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2
+    ; CHECK-NEXT: $v0m4 = VMV4R_V $v4m4
     ; CHECK-NEXT: $v2m2 = VMV2R_V $v4m2
     ; CHECK-NEXT: $v4m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v8m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2
-    ; CHECK-NEXT: $v8m2 = VMV2R_V $v4m2
-    ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
-    ; CHECK-NEXT: $v0m4 = VMV4R_V $v8m4
-    ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v8m4 = VMV4R_V $v4m4
+    ; CHECK-NEXT: $v0m8 = VMV8R_V $v8m8
     ; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4
     ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
     ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
     ; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
-    ; CHECK-NEXT: $v16m4 = VMV4R_V $v8m4
-    ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v16m8 = VMV8R_V $v8m8
     $v2_v3 = COPY $v4_v5
     $v3_v4 = COPY $v4_v5
     $v5_v6 = COPY $v4_v5
@@ -55,25 +49,20 @@ body:             |
     ; CHECK-NEXT: $v3 = VMV1R_V $v6
     ; CHECK-NEXT: $v4 = VMV1R_V $v7
     ; CHECK-NEXT: $v3 = VMV1R_V $v5
-    ; CHECK-NEXT: $v4 = VMV1R_V $v6
-    ; CHECK-NEXT: $v5 = VMV1R_V $v7
+    ; CHECK-NEXT: $v4m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v4 = VMV1R_V $v5
     ; CHECK-NEXT: $v5 = VMV1R_V $v6
     ; CHECK-NEXT: $v6 = VMV1R_V $v7
-    ; CHECK-NEXT: $v9 = VMV1R_V $v7
-    ; CHECK-NEXT: $v8 = VMV1R_V $v6
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v7 = VMV1R_V $v5
     ; CHECK-NEXT: $v9 = VMV1R_V $v5
-    ; CHECK-NEXT: $v10 = VMV1R_V $v6
-    ; CHECK-NEXT: $v11 = VMV1R_V $v7
+    ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v0m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v2m2 = VMV2R_V $v8m2
     ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2
-    ; CHECK-NEXT: $v4m2 = VMV2R_V $v8m2
-    ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
-    ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
-    ; CHECK-NEXT: $v12m2 = VMV2R_V $v8m2
+    ; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4
+    ; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
     ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v12m2 = VMV2R_V $v6m2
     ; CHECK-NEXT: $v14m2 = VMV2R_V $v8m2
@@ -94,10 +83,8 @@ name: copy_zvlsseg_N4
 body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N4
-    ; CHECK: $v6 = VMV1R_V $v10
-    ; CHECK-NEXT: $v7 = VMV1R_V $v11
-    ; CHECK-NEXT: $v8 = VMV1R_V $v12
-    ; CHECK-NEXT: $v9 = VMV1R_V $v13
+    ; CHECK: $v6m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v12m2
     ; CHECK-NEXT: $v7 = VMV1R_V $v10
     ; CHECK-NEXT: $v8 = VMV1R_V $v11
     ; CHECK-NEXT: $v9 = VMV1R_V $v12
@@ -106,13 +93,10 @@ body:             |
     ; CHECK-NEXT: $v15 = VMV1R_V $v12
     ; CHECK-NEXT: $v14 = VMV1R_V $v11
     ; CHECK-NEXT: $v13 = VMV1R_V $v10
-    ; CHECK-NEXT: $v14 = VMV1R_V $v10
-    ; CHECK-NEXT: $v15 = VMV1R_V $v11
-    ; CHECK-NEXT: $v16 = VMV1R_V $v12
-    ; CHECK-NEXT: $v17 = VMV1R_V $v13
+    ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v16m2 = VMV2R_V $v12m2
     ; CHECK-NEXT: $v2m2 = VMV2R_V $v10m2
-    ; CHECK-NEXT: $v4m2 = VMV2R_V $v12m2
-    ; CHECK-NEXT: $v6m2 = VMV2R_V $v14m2
+    ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
     ; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2
     ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
@@ -123,8 +107,7 @@ body:             |
     ; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2
     ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2
-    ; CHECK-NEXT: $v20m2 = VMV2R_V $v12m2
-    ; CHECK-NEXT: $v22m2 = VMV2R_V $v14m2
+    ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
     ; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2
     $v6_v7_v8_v9 = COPY $v10_v11_v12_v13
     $v7_v8_v9_v10 = COPY $v10_v11_v12_v13
@@ -146,57 +129,59 @@ body:             |
     ; CHECK-NEXT: $v7 = VMV1R_V $v12
     ; CHECK-NEXT: $v8 = VMV1R_V $v13
     ; CHECK-NEXT: $v9 = VMV1R_V $v14
-    ; CHECK-NEXT: $v6 = VMV1R_V $v10
-    ; CHECK-NEXT: $v7 = VMV1R_V $v11
-    ; CHECK-NEXT: $v8 = VMV1R_V $v12
-    ; CHECK-NEXT: $v9 = VMV1R_V $v13
+    ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v12m2
     ; CHECK-NEXT: $v10 = VMV1R_V $v14
     ; CHECK-NEXT: $v18 = VMV1R_V $v14
-    ; CHECK-NEXT: $v17 = VMV1R_V $v13
-    ; CHECK-NEXT: $v16 = VMV1R_V $v12
-    ; CHECK-NEXT: $v15 = VMV1R_V $v11
-    ; CHECK-NEXT: $v14 = VMV1R_V $v10
+    ; CHECK-NEXT: $v16m2 = VMV2R_V $v12m2
+    ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v15 = VMV1R_V $v10
     ; CHECK-NEXT: $v16 = VMV1R_V $v11
     ; CHECK-NEXT: $v17 = VMV1R_V $v12
     ; CHECK-NEXT: $v18 = VMV1R_V $v13
     ; CHECK-NEXT: $v19 = VMV1R_V $v14
+    ; CHECK-NEXT: $v7 = VMV1R_V $v11
+    ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v15 = VMV1R_V $v11
     $v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14
     $v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14
     $v14_v15_v16_v17_v18 = COPY $v10_v11_v12_v13_v14
     $v15_v16_v17_v18_v19 = COPY $v10_v11_v12_v13_v14
+    $v7_v8_v9_v10_v11 = COPY $v11_v12_v13_v14_v15
+    $v15_v16_v17_v18_v19 = COPY $v11_v12_v13_v14_v15
 ...
 ---
 name: copy_zvlsseg_N6
 body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N6
-    ; CHECK: $v4 = VMV1R_V $v10
-    ; CHECK-NEXT: $v5 = VMV1R_V $v11
-    ; CHECK-NEXT: $v6 = VMV1R_V $v12
-    ; CHECK-NEXT: $v7 = VMV1R_V $v13
-    ; CHECK-NEXT: $v8 = VMV1R_V $v14
-    ; CHECK-NEXT: $v9 = VMV1R_V $v15
+    ; CHECK: $v4m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
     ; CHECK-NEXT: $v5 = VMV1R_V $v10
     ; CHECK-NEXT: $v6 = VMV1R_V $v11
     ; CHECK-NEXT: $v7 = VMV1R_V $v12
     ; CHECK-NEXT: $v8 = VMV1R_V $v13
     ; CHECK-NEXT: $v9 = VMV1R_V $v14
     ; CHECK-NEXT: $v10 = VMV1R_V $v15
+    ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v20 = VMV1R_V $v15
     ; CHECK-NEXT: $v19 = VMV1R_V $v14
     ; CHECK-NEXT: $v18 = VMV1R_V $v13
     ; CHECK-NEXT: $v17 = VMV1R_V $v12
     ; CHECK-NEXT: $v16 = VMV1R_V $v11
     ; CHECK-NEXT: $v15 = VMV1R_V $v10
-    ; CHECK-NEXT: $v16 = VMV1R_V $v10
-    ; CHECK-NEXT: $v17 = VMV1R_V $v11
-    ; CHECK-NEXT: $v18 = VMV1R_V $v12
-    ; CHECK-NEXT: $v19 = VMV1R_V $v13
-    ; CHECK-NEXT: $v20 = VMV1R_V $v14
-    ; CHECK-NEXT: $v21 = VMV1R_V $v15
+    ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2
+    ; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2
     $v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15
     $v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15
+    $v6_v7_v8_v9_v10_v11 = COPY $v10_v11_v12_v13_v14_v15
+    $v14_v15_v16_v17_v18_v19 = COPY $v10_v11_v12_v13_v14_v15
     $v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15
     $v16_v17_v18_v19_v20_v21 = COPY $v10_v11_v12_v13_v14_v15
 ...
@@ -212,20 +197,17 @@ body:             |
     ; CHECK-NEXT: $v7 = VMV1R_V $v14
     ; CHECK-NEXT: $v8 = VMV1R_V $v15
     ; CHECK-NEXT: $v9 = VMV1R_V $v16
-    ; CHECK-NEXT: $v4 = VMV1R_V $v10
-    ; CHECK-NEXT: $v5 = VMV1R_V $v11
-    ; CHECK-NEXT: $v6 = VMV1R_V $v12
-    ; CHECK-NEXT: $v7 = VMV1R_V $v13
-    ; CHECK-NEXT: $v8 = VMV1R_V $v14
-    ; CHECK-NEXT: $v9 = VMV1R_V $v15
+    ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
     ; CHECK-NEXT: $v10 = VMV1R_V $v16
+    ; CHECK-NEXT: $v20 = VMV1R_V $v16
+    ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v22 = VMV1R_V $v16
-    ; CHECK-NEXT: $v21 = VMV1R_V $v15
-    ; CHECK-NEXT: $v20 = VMV1R_V $v14
-    ; CHECK-NEXT: $v19 = VMV1R_V $v13
-    ; CHECK-NEXT: $v18 = VMV1R_V $v12
-    ; CHECK-NEXT: $v17 = VMV1R_V $v11
-    ; CHECK-NEXT: $v16 = VMV1R_V $v10
+    ; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2
+    ; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2
+    ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
     ; CHECK-NEXT: $v17 = VMV1R_V $v10
     ; CHECK-NEXT: $v18 = VMV1R_V $v11
     ; CHECK-NEXT: $v19 = VMV1R_V $v12
@@ -233,24 +215,28 @@ body:             |
     ; CHECK-NEXT: $v21 = VMV1R_V $v14
     ; CHECK-NEXT: $v22 = VMV1R_V $v15
     ; CHECK-NEXT: $v23 = VMV1R_V $v16
+    ; CHECK-NEXT: $v22 = VMV1R_V $v21
+    ; CHECK-NEXT: $v21 = VMV1R_V $v20
+    ; CHECK-NEXT: $v20 = VMV1R_V $v19
+    ; CHECK-NEXT: $v19 = VMV1R_V $v18
+    ; CHECK-NEXT: $v18 = VMV1R_V $v17
+    ; CHECK-NEXT: $v17 = VMV1R_V $v16
+    ; CHECK-NEXT: $v16 = VMV1R_V $v15
     $v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16
     $v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16
+    $v14_v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15_v16
     $v16_v17_v18_v19_v20_v21_v22 = COPY $v10_v11_v12_v13_v14_v15_v16
     $v17_v18_v19_v20_v21_v22_v23 = COPY $v10_v11_v12_v13_v14_v15_v16
+    $v16_v17_v18_v19_v20_v21_v22 = COPY $v15_v16_v17_v18_v19_v20_v21
 ...
 ---
 name: copy_zvlsseg_N8
 body:             |
   bb.0:
     ; CHECK-LABEL: name: copy_zvlsseg_N8
-    ; CHECK: $v2 = VMV1R_V $v10
-    ; CHECK-NEXT: $v3 = VMV1R_V $v11
-    ; CHECK-NEXT: $v4 = VMV1R_V $v12
-    ; CHECK-NEXT: $v5 = VMV1R_V $v13
-    ; CHECK-NEXT: $v6 = VMV1R_V $v14
-    ; CHECK-NEXT: $v7 = VMV1R_V $v15
-    ; CHECK-NEXT: $v8 = VMV1R_V $v16
-    ; CHECK-NEXT: $v9 = VMV1R_V $v17
+    ; CHECK: $v2m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2
     ; CHECK-NEXT: $v3 = VMV1R_V $v10
     ; CHECK-NEXT: $v4 = VMV1R_V $v11
     ; CHECK-NEXT: $v5 = VMV1R_V $v12
@@ -267,16 +253,15 @@ body:             |
     ; CHECK-NEXT: $v19 = VMV1R_V $v12
     ; CHECK-NEXT: $v18 = VMV1R_V $v11
     ; CHECK-NEXT: $v17 = VMV1R_V $v10
-    ; CHECK-NEXT: $v18 = VMV1R_V $v10
-    ; CHECK-NEXT: $v19 = VMV1R_V $v11
-    ; CHECK-NEXT: $v20 = VMV1R_V $v12
-    ; CHECK-NEXT: $v21 = VMV1R_V $v13
-    ; CHECK-NEXT: $v22 = VMV1R_V $v14
-    ; CHECK-NEXT: $v23 = VMV1R_V $v15
-    ; CHECK-NEXT: $v24 = VMV1R_V $v16
-    ; CHECK-NEXT: $v25 = VMV1R_V $v17
+    ; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2
+    ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
+    ; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2
+    ; CHECK-NEXT: $v8m8 = VMV8R_V $v0m8
+    ; CHECK-NEXT: $v0m8 = VMV8R_V $v8m8
     $v2_v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
     $v3_v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
     $v17_v18_v19_v20_v21_v22_v23_v24 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
     $v18_v19_v20_v21_v22_v23_v24_v25 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
+    $v8_v9_v10_v11_v12_v13_v14_v15 = COPY $v0_v1_v2_v3_v4_v5_v6_v7
+    $v0_v1_v2_v3_v4_v5_v6_v7 = COPY $v8_v9_v10_v11_v12_v13_v14_v15
 ...


        


More information about the llvm-commits mailing list