[llvm-branch-commits] [llvm] [RISCV] Use larger copies when register tuples are aligned (PR #84455)
Pengcheng Wang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sun Apr 7 22:23:29 PDT 2024
https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/84455
>From 35d0ea085b43a67c092e6263e6ec9d34e66e1453 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 12 Mar 2024 17:31:47 +0800
Subject: [PATCH 01/10] Reduce copies
Created using spr 1.3.4
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 89 +++++-----
llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir | 30 +---
llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir | 175 +++++++------------
3 files changed, 106 insertions(+), 188 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 7895e87702c711..9fe5666d6a81f4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -302,58 +302,38 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
RISCVII::VLMUL LMul, unsigned NF) const {
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
- int I = 0, End = NF, Incr = 1;
unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
unsigned DstEncoding = TRI->getEncodingValue(DstReg);
unsigned LMulVal;
bool Fractional;
std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
assert(!Fractional && "It is impossible be fractional lmul here.");
- if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
- I = NF - 1;
- End = -1;
- Incr = -1;
- }
+ unsigned NumRegs = NF * LMulVal;
+ bool ReversedCopy =
+ forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NumRegs);
- for (; I != End; I += Incr) {
+ unsigned I = 0;
+ while (I != NumRegs) {
auto GetCopyInfo =
- [](RISCVII::VLMUL LMul,unsigned NF) -> std::tuple<unsigned, unsigned, unsigned, unsigned> {
- unsigned Opc;
- unsigned SubRegIdx;
- unsigned VVOpc, VIOpc;
- switch (LMul) {
- default:
- llvm_unreachable("Impossible LMUL for vector register copy.");
- case RISCVII::LMUL_1:
- Opc = RISCV::VMV1R_V;
- SubRegIdx = RISCV::sub_vrm1_0;
- VVOpc = RISCV::PseudoVMV_V_V_M1;
- VIOpc = RISCV::PseudoVMV_V_I_M1;
- break;
- case RISCVII::LMUL_2:
- Opc = RISCV::VMV2R_V;
- SubRegIdx = RISCV::sub_vrm2_0;
- VVOpc = RISCV::PseudoVMV_V_V_M2;
- VIOpc = RISCV::PseudoVMV_V_I_M2;
- break;
- case RISCVII::LMUL_4:
- Opc = RISCV::VMV4R_V;
- SubRegIdx = RISCV::sub_vrm4_0;
- VVOpc = RISCV::PseudoVMV_V_V_M4;
- VIOpc = RISCV::PseudoVMV_V_I_M4;
- break;
- case RISCVII::LMUL_8:
- assert(NF == 1);
- Opc = RISCV::VMV8R_V;
- SubRegIdx = RISCV::sub_vrm1_0; // There is no sub_vrm8_0.
- VVOpc = RISCV::PseudoVMV_V_V_M8;
- VIOpc = RISCV::PseudoVMV_V_I_M8;
- break;
- }
- return {SubRegIdx, Opc, VVOpc, VIOpc};
+ [&](unsigned SrcReg,
+ unsigned DstReg) -> std::tuple<int, const TargetRegisterClass &,
+ unsigned, unsigned, unsigned> {
+ unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
+ unsigned DstEncoding = TRI->getEncodingValue(DstReg);
+ if (!(SrcEncoding & 0b111) && !(DstEncoding & 0b111) && I + 8 <= NumRegs)
+ return {8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8,
+ RISCV::PseudoVMV_V_I_M8};
+ if (!(SrcEncoding & 0b11) && !(DstEncoding & 0b11) && I + 4 <= NumRegs)
+ return {4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4,
+ RISCV::PseudoVMV_V_I_M4};
+ if (!(SrcEncoding & 0b1) && !(DstEncoding & 0b1) && I + 2 <= NumRegs)
+ return {2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2,
+ RISCV::PseudoVMV_V_I_M2};
+ return {1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1,
+ RISCV::PseudoVMV_V_I_M1};
};
- auto [SubRegIdx, Opc, VVOpc, VIOpc] = GetCopyInfo(LMul, NF);
+ auto [NumCopied, RegClass, Opc, VVOpc, VIOpc] = GetCopyInfo(SrcReg, DstReg);
MachineBasicBlock::const_iterator DefMBBI;
if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
@@ -364,6 +344,20 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
}
}
+ for (MCPhysReg Reg : RegClass.getRegisters()) {
+ if (TRI->getEncodingValue(Reg) == TRI->getEncodingValue(SrcReg)) {
+ SrcReg = Reg;
+ break;
+ }
+ }
+
+ for (MCPhysReg Reg : RegClass.getRegisters()) {
+ if (TRI->getEncodingValue(Reg) == TRI->getEncodingValue(DstReg)) {
+ DstReg = Reg;
+ break;
+ }
+ }
+
auto EmitCopy = [&](MCRegister SrcReg, MCRegister DstReg, unsigned Opcode) {
auto MIB = BuildMI(MBB, MBBI, DL, get(Opcode), DstReg);
bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opcode) == RISCV::VMV_V_I;
@@ -385,13 +379,10 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
}
};
- if (NF == 1) {
- EmitCopy(SrcReg, DstReg, Opc);
- return;
- }
-
- EmitCopy(TRI->getSubReg(SrcReg, SubRegIdx + I),
- TRI->getSubReg(DstReg, SubRegIdx + I), Opc);
+ EmitCopy(SrcReg, DstReg, Opc);
+ SrcReg = SrcReg.id() + (ReversedCopy ? -NumCopied : NumCopied);
+ DstReg = DstReg.id() + (ReversedCopy ? -NumCopied : NumCopied);
+ I += NumCopied;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 4fa29e174602d0..dd569129db4d72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -8,7 +8,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 82 = e32,m4
; CHECK-LABEL: name: copy_different_lmul
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -25,7 +24,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 82 = e32,m4
; CHECK-LABEL: name: copy_convert_to_vmv_v_v
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -42,7 +40,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14
- ; 82 = e32,m4
; CHECK-LABEL: name: copy_convert_to_vmv_v_i
; CHECK: liveins: $x14
; CHECK-NEXT: {{ $}}
@@ -59,7 +56,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 82 = e32,m4
; CHECK-LABEL: name: copy_from_whole_load_store
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -76,7 +72,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 82 = e32,m4
; CHECK-LABEL: name: copy_with_vleff
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -95,8 +90,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16, $x17, $x18
- ; 82 = e32,m4
- ; 73 = e16,m2
; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_1
; CHECK: liveins: $x14, $x16, $x17, $x18
; CHECK-NEXT: {{ $}}
@@ -121,8 +114,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16, $x17, $x18
- ; 82 = e32,m4
- ; 73 = e16,m2
; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_2
; CHECK: liveins: $x14, $x16, $x17, $x18
; CHECK-NEXT: {{ $}}
@@ -147,8 +138,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16, $x17, $x18
- ; 82 = e32,m4
- ; 73 = e16,m2
; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_3
; CHECK: liveins: $x14, $x16, $x17, $x18
; CHECK-NEXT: {{ $}}
@@ -169,7 +158,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x16, $x17
- ; 73 = e16,m2
; CHECK-LABEL: name: copy_subregister
; CHECK: liveins: $x16, $x17
; CHECK-NEXT: {{ $}}
@@ -191,8 +179,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 82 = e32,m4
- ; 74 = e16,m4
; CHECK-LABEL: name: copy_with_different_vlmax
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -231,7 +217,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 80 = e32,m1
; CHECK-LABEL: name: copy_zvlsseg_reg
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -248,14 +233,12 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 80 = e32,m1
; CHECK-LABEL: name: copy_zvlsseg_reg_2
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v10m2 = PseudoVMV_V_V_M2 undef $v10m2, $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
$v10_v11 = COPY $v8_v9
@@ -266,7 +249,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x14, $x16
- ; 87 = e32,mf2
; CHECK-LABEL: name: copy_fractional_lmul
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -283,7 +265,6 @@ tracksRegLiveness: true
body: |
bb.0:
liveins: $x12, $x14, $x16
- ; 80 = e32,m1
; CHECK-LABEL: name: copy_implicit_def
; CHECK: liveins: $x12, $x14, $x16
; CHECK-NEXT: {{ $}}
@@ -291,14 +272,7 @@ body: |
; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
- ; CHECK-NEXT: $v24 = VMV1R_V killed $v8
- ; CHECK-NEXT: $v25 = VMV1R_V killed $v9
- ; CHECK-NEXT: $v26 = VMV1R_V killed $v10
- ; CHECK-NEXT: $v27 = VMV1R_V killed $v11
- ; CHECK-NEXT: $v28 = VMV1R_V killed $v12
- ; CHECK-NEXT: $v29 = VMV1R_V killed $v13
- ; CHECK-NEXT: $v30 = VMV1R_V killed $v14
- ; CHECK-NEXT: $v31 = VMV1R_V killed $v15
+ ; CHECK-NEXT: $v24m8 = VMV8R_V killed $v8m8
$x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype
$x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
index 85bb54471ed3c8..afd8b27a3b90b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
@@ -7,30 +7,24 @@ name: copy_zvlsseg_N2
body: |
bb.0:
; CHECK-LABEL: name: copy_zvlsseg_N2
- ; CHECK: $v2 = VMV1R_V $v4
- ; CHECK-NEXT: $v3 = VMV1R_V $v5
+ ; CHECK: $v2m2 = VMV2R_V $v4m2
; CHECK-NEXT: $v3 = VMV1R_V $v4
; CHECK-NEXT: $v4 = VMV1R_V $v5
- ; CHECK-NEXT: $v6 = VMV1R_V $v5
; CHECK-NEXT: $v5 = VMV1R_V $v4
- ; CHECK-NEXT: $v6 = VMV1R_V $v4
- ; CHECK-NEXT: $v7 = VMV1R_V $v5
- ; CHECK-NEXT: $v0m2 = VMV2R_V $v4m2
- ; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2
+ ; CHECK-NEXT: $v4 = VMV1R_V $v3
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2
+ ; CHECK-NEXT: $v0m4 = VMV4R_V $v4m4
; CHECK-NEXT: $v2m2 = VMV2R_V $v4m2
; CHECK-NEXT: $v4m2 = VMV2R_V $v6m2
- ; CHECK-NEXT: $v8m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2
- ; CHECK-NEXT: $v8m2 = VMV2R_V $v4m2
- ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
- ; CHECK-NEXT: $v0m4 = VMV4R_V $v8m4
- ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v4m2 = VMV2R_V $v0m2
+ ; CHECK-NEXT: $v8m4 = VMV4R_V $v4m4
+ ; CHECK-NEXT: $v0m8 = VMV8R_V $v8m8
; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4
; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
- ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
- ; CHECK-NEXT: $v16m4 = VMV4R_V $v8m4
- ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v8m4 = VMV4R_V $v4m4
+ ; CHECK-NEXT: $v16m8 = VMV8R_V $v8m8
$v2_v3 = COPY $v4_v5
$v3_v4 = COPY $v4_v5
$v5_v6 = COPY $v4_v5
@@ -55,29 +49,23 @@ body: |
; CHECK-NEXT: $v3 = VMV1R_V $v6
; CHECK-NEXT: $v4 = VMV1R_V $v7
; CHECK-NEXT: $v3 = VMV1R_V $v5
- ; CHECK-NEXT: $v4 = VMV1R_V $v6
- ; CHECK-NEXT: $v5 = VMV1R_V $v7
+ ; CHECK-NEXT: $v4m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v4 = VMV1R_V $v5
; CHECK-NEXT: $v5 = VMV1R_V $v6
; CHECK-NEXT: $v6 = VMV1R_V $v7
- ; CHECK-NEXT: $v9 = VMV1R_V $v7
- ; CHECK-NEXT: $v8 = VMV1R_V $v6
; CHECK-NEXT: $v7 = VMV1R_V $v5
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2
; CHECK-NEXT: $v9 = VMV1R_V $v5
- ; CHECK-NEXT: $v10 = VMV1R_V $v6
- ; CHECK-NEXT: $v11 = VMV1R_V $v7
+ ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v0m2 = VMV2R_V $v6m2
- ; CHECK-NEXT: $v2m2 = VMV2R_V $v8m2
- ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v0m4 = VMV4R_V $v8m4
; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2
- ; CHECK-NEXT: $v4m2 = VMV2R_V $v8m2
- ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
- ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
- ; CHECK-NEXT: $v12m2 = VMV2R_V $v8m2
+ ; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4
; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
+ ; CHECK-NEXT: $v8m4 = VMV4R_V $v4m4
; CHECK-NEXT: $v12m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v14m2 = VMV2R_V $v8m2
- ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v16m2 = VMV2R_V $v8m2
$v2_v3_v4 = COPY $v5_v6_v7
$v3_v4_v5 = COPY $v5_v6_v7
$v4_v5_v6 = COPY $v5_v6_v7
@@ -94,37 +82,30 @@ name: copy_zvlsseg_N4
body: |
bb.0:
; CHECK-LABEL: name: copy_zvlsseg_N4
- ; CHECK: $v6 = VMV1R_V $v10
- ; CHECK-NEXT: $v7 = VMV1R_V $v11
- ; CHECK-NEXT: $v8 = VMV1R_V $v12
- ; CHECK-NEXT: $v9 = VMV1R_V $v13
+ ; CHECK: $v6m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v12m2
; CHECK-NEXT: $v7 = VMV1R_V $v10
; CHECK-NEXT: $v8 = VMV1R_V $v11
; CHECK-NEXT: $v9 = VMV1R_V $v12
; CHECK-NEXT: $v10 = VMV1R_V $v13
- ; CHECK-NEXT: $v16 = VMV1R_V $v13
- ; CHECK-NEXT: $v15 = VMV1R_V $v12
- ; CHECK-NEXT: $v14 = VMV1R_V $v11
; CHECK-NEXT: $v13 = VMV1R_V $v10
- ; CHECK-NEXT: $v14 = VMV1R_V $v10
- ; CHECK-NEXT: $v15 = VMV1R_V $v11
- ; CHECK-NEXT: $v16 = VMV1R_V $v12
- ; CHECK-NEXT: $v17 = VMV1R_V $v13
+ ; CHECK-NEXT: $v12 = VMV1R_V $v9
+ ; CHECK-NEXT: $v11 = VMV1R_V $v8
+ ; CHECK-NEXT: $v10 = VMV1R_V $v7
+ ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v16m2 = VMV2R_V $v12m2
; CHECK-NEXT: $v2m2 = VMV2R_V $v10m2
- ; CHECK-NEXT: $v4m2 = VMV2R_V $v12m2
- ; CHECK-NEXT: $v6m2 = VMV2R_V $v14m2
+ ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2
; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
- ; CHECK-NEXT: $v10m2 = VMV2R_V $v16m2
- ; CHECK-NEXT: $v22m2 = VMV2R_V $v16m2
- ; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2
- ; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2
; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v4m2
; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2
- ; CHECK-NEXT: $v20m2 = VMV2R_V $v12m2
- ; CHECK-NEXT: $v22m2 = VMV2R_V $v14m2
+ ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2
$v6_v7_v8_v9 = COPY $v10_v11_v12_v13
$v7_v8_v9_v10 = COPY $v10_v11_v12_v13
@@ -146,16 +127,12 @@ body: |
; CHECK-NEXT: $v7 = VMV1R_V $v12
; CHECK-NEXT: $v8 = VMV1R_V $v13
; CHECK-NEXT: $v9 = VMV1R_V $v14
- ; CHECK-NEXT: $v6 = VMV1R_V $v10
- ; CHECK-NEXT: $v7 = VMV1R_V $v11
- ; CHECK-NEXT: $v8 = VMV1R_V $v12
- ; CHECK-NEXT: $v9 = VMV1R_V $v13
- ; CHECK-NEXT: $v10 = VMV1R_V $v14
- ; CHECK-NEXT: $v18 = VMV1R_V $v14
- ; CHECK-NEXT: $v17 = VMV1R_V $v13
- ; CHECK-NEXT: $v16 = VMV1R_V $v12
- ; CHECK-NEXT: $v15 = VMV1R_V $v11
- ; CHECK-NEXT: $v14 = VMV1R_V $v10
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v12m2
+ ; CHECK-NEXT: $v8 = VMV1R_V $v14
+ ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v12m2 = VMV2R_V $v8m2
+ ; CHECK-NEXT: $v8 = VMV1R_V $v4
; CHECK-NEXT: $v15 = VMV1R_V $v10
; CHECK-NEXT: $v16 = VMV1R_V $v11
; CHECK-NEXT: $v17 = VMV1R_V $v12
@@ -171,30 +148,23 @@ name: copy_zvlsseg_N6
body: |
bb.0:
; CHECK-LABEL: name: copy_zvlsseg_N6
- ; CHECK: $v4 = VMV1R_V $v10
- ; CHECK-NEXT: $v5 = VMV1R_V $v11
- ; CHECK-NEXT: $v6 = VMV1R_V $v12
- ; CHECK-NEXT: $v7 = VMV1R_V $v13
- ; CHECK-NEXT: $v8 = VMV1R_V $v14
- ; CHECK-NEXT: $v9 = VMV1R_V $v15
+ ; CHECK: $v4m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
; CHECK-NEXT: $v5 = VMV1R_V $v10
; CHECK-NEXT: $v6 = VMV1R_V $v11
; CHECK-NEXT: $v7 = VMV1R_V $v12
; CHECK-NEXT: $v8 = VMV1R_V $v13
; CHECK-NEXT: $v9 = VMV1R_V $v14
; CHECK-NEXT: $v10 = VMV1R_V $v15
- ; CHECK-NEXT: $v20 = VMV1R_V $v15
- ; CHECK-NEXT: $v19 = VMV1R_V $v14
- ; CHECK-NEXT: $v18 = VMV1R_V $v13
- ; CHECK-NEXT: $v17 = VMV1R_V $v12
- ; CHECK-NEXT: $v16 = VMV1R_V $v11
; CHECK-NEXT: $v15 = VMV1R_V $v10
- ; CHECK-NEXT: $v16 = VMV1R_V $v10
- ; CHECK-NEXT: $v17 = VMV1R_V $v11
- ; CHECK-NEXT: $v18 = VMV1R_V $v12
- ; CHECK-NEXT: $v19 = VMV1R_V $v13
- ; CHECK-NEXT: $v20 = VMV1R_V $v14
- ; CHECK-NEXT: $v21 = VMV1R_V $v15
+ ; CHECK-NEXT: $v14 = VMV1R_V $v9
+ ; CHECK-NEXT: $v13 = VMV1R_V $v8
+ ; CHECK-NEXT: $v12 = VMV1R_V $v7
+ ; CHECK-NEXT: $v11 = VMV1R_V $v6
+ ; CHECK-NEXT: $v10 = VMV1R_V $v5
+ ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
$v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15
$v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15
$v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15
@@ -212,20 +182,13 @@ body: |
; CHECK-NEXT: $v7 = VMV1R_V $v14
; CHECK-NEXT: $v8 = VMV1R_V $v15
; CHECK-NEXT: $v9 = VMV1R_V $v16
- ; CHECK-NEXT: $v4 = VMV1R_V $v10
- ; CHECK-NEXT: $v5 = VMV1R_V $v11
- ; CHECK-NEXT: $v6 = VMV1R_V $v12
- ; CHECK-NEXT: $v7 = VMV1R_V $v13
- ; CHECK-NEXT: $v8 = VMV1R_V $v14
- ; CHECK-NEXT: $v9 = VMV1R_V $v15
- ; CHECK-NEXT: $v10 = VMV1R_V $v16
- ; CHECK-NEXT: $v22 = VMV1R_V $v16
- ; CHECK-NEXT: $v21 = VMV1R_V $v15
- ; CHECK-NEXT: $v20 = VMV1R_V $v14
- ; CHECK-NEXT: $v19 = VMV1R_V $v13
- ; CHECK-NEXT: $v18 = VMV1R_V $v12
- ; CHECK-NEXT: $v17 = VMV1R_V $v11
- ; CHECK-NEXT: $v16 = VMV1R_V $v10
+ ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
+ ; CHECK-NEXT: $v8 = VMV1R_V $v16
+ ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
+ ; CHECK-NEXT: $v8 = VMV1R_V $v4
; CHECK-NEXT: $v17 = VMV1R_V $v10
; CHECK-NEXT: $v18 = VMV1R_V $v11
; CHECK-NEXT: $v19 = VMV1R_V $v12
@@ -243,14 +206,9 @@ name: copy_zvlsseg_N8
body: |
bb.0:
; CHECK-LABEL: name: copy_zvlsseg_N8
- ; CHECK: $v2 = VMV1R_V $v10
- ; CHECK-NEXT: $v3 = VMV1R_V $v11
- ; CHECK-NEXT: $v4 = VMV1R_V $v12
- ; CHECK-NEXT: $v5 = VMV1R_V $v13
- ; CHECK-NEXT: $v6 = VMV1R_V $v14
- ; CHECK-NEXT: $v7 = VMV1R_V $v15
- ; CHECK-NEXT: $v8 = VMV1R_V $v16
- ; CHECK-NEXT: $v9 = VMV1R_V $v17
+ ; CHECK: $v2m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2
; CHECK-NEXT: $v3 = VMV1R_V $v10
; CHECK-NEXT: $v4 = VMV1R_V $v11
; CHECK-NEXT: $v5 = VMV1R_V $v12
@@ -259,22 +217,17 @@ body: |
; CHECK-NEXT: $v8 = VMV1R_V $v15
; CHECK-NEXT: $v9 = VMV1R_V $v16
; CHECK-NEXT: $v10 = VMV1R_V $v17
- ; CHECK-NEXT: $v24 = VMV1R_V $v17
- ; CHECK-NEXT: $v23 = VMV1R_V $v16
- ; CHECK-NEXT: $v22 = VMV1R_V $v15
- ; CHECK-NEXT: $v21 = VMV1R_V $v14
- ; CHECK-NEXT: $v20 = VMV1R_V $v13
- ; CHECK-NEXT: $v19 = VMV1R_V $v12
- ; CHECK-NEXT: $v18 = VMV1R_V $v11
; CHECK-NEXT: $v17 = VMV1R_V $v10
- ; CHECK-NEXT: $v18 = VMV1R_V $v10
- ; CHECK-NEXT: $v19 = VMV1R_V $v11
- ; CHECK-NEXT: $v20 = VMV1R_V $v12
- ; CHECK-NEXT: $v21 = VMV1R_V $v13
- ; CHECK-NEXT: $v22 = VMV1R_V $v14
- ; CHECK-NEXT: $v23 = VMV1R_V $v15
- ; CHECK-NEXT: $v24 = VMV1R_V $v16
- ; CHECK-NEXT: $v25 = VMV1R_V $v17
+ ; CHECK-NEXT: $v16 = VMV1R_V $v9
+ ; CHECK-NEXT: $v15 = VMV1R_V $v8
+ ; CHECK-NEXT: $v14 = VMV1R_V $v7
+ ; CHECK-NEXT: $v13 = VMV1R_V $v6
+ ; CHECK-NEXT: $v12 = VMV1R_V $v5
+ ; CHECK-NEXT: $v11 = VMV1R_V $v4
+ ; CHECK-NEXT: $v10 = VMV1R_V $v3
+ ; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2
$v2_v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
$v3_v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
$v17_v18_v19_v20_v21_v22_v23_v24 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
>From de09e8b8d26c835e551879c058cda3f8130ae053 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 14 Mar 2024 16:45:26 +0800
Subject: [PATCH 02/10] Address comments and fix wrong LMUL passed to
isConvertibleToVMV_V_V
Created using spr 1.3.4
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 104 +++++++++++------------
llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir | 12 ++-
2 files changed, 58 insertions(+), 58 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 9fe5666d6a81f4..3dd0d0c456dd5f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -313,73 +313,65 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NumRegs);
unsigned I = 0;
+ auto GetCopyInfo = [&](MCRegister SrcReg, MCRegister DstReg)
+ -> std::tuple<RISCVII::VLMUL, const TargetRegisterClass &, unsigned,
+ unsigned, unsigned> {
+ unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
+ unsigned DstEncoding = TRI->getEncodingValue(DstReg);
+ if (!(SrcEncoding & 0b111) && !(DstEncoding & 0b111) && I + 8 <= NumRegs)
+ return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+ RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
+ if (!(SrcEncoding & 0b11) && !(DstEncoding & 0b11) && I + 4 <= NumRegs)
+ return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+ RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
+ if (!(SrcEncoding & 0b1) && !(DstEncoding & 0b1) && I + 2 <= NumRegs)
+ return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+ RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
+ return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+ RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
+ };
while (I != NumRegs) {
- auto GetCopyInfo =
- [&](unsigned SrcReg,
- unsigned DstReg) -> std::tuple<int, const TargetRegisterClass &,
- unsigned, unsigned, unsigned> {
- unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
- unsigned DstEncoding = TRI->getEncodingValue(DstReg);
- if (!(SrcEncoding & 0b111) && !(DstEncoding & 0b111) && I + 8 <= NumRegs)
- return {8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8,
- RISCV::PseudoVMV_V_I_M8};
- if (!(SrcEncoding & 0b11) && !(DstEncoding & 0b11) && I + 4 <= NumRegs)
- return {4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4,
- RISCV::PseudoVMV_V_I_M4};
- if (!(SrcEncoding & 0b1) && !(DstEncoding & 0b1) && I + 2 <= NumRegs)
- return {2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2,
- RISCV::PseudoVMV_V_I_M2};
- return {1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1,
- RISCV::PseudoVMV_V_I_M1};
- };
-
- auto [NumCopied, RegClass, Opc, VVOpc, VIOpc] = GetCopyInfo(SrcReg, DstReg);
+ auto [LMul, RegClass, Opc, VVOpc, VIOpc] = GetCopyInfo(SrcReg, DstReg);
+ unsigned NumCopied = 1 << LMul;
MachineBasicBlock::const_iterator DefMBBI;
if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
Opc = VVOpc;
-
- if (DefMBBI->getOpcode() == VIOpc) {
+ if (DefMBBI->getOpcode() == VIOpc)
Opc = VIOpc;
- }
}
- for (MCPhysReg Reg : RegClass.getRegisters()) {
- if (TRI->getEncodingValue(Reg) == TRI->getEncodingValue(SrcReg)) {
- SrcReg = Reg;
- break;
- }
- }
+ ArrayRef<MCPhysReg> Regs = RegClass.getRegisters();
+ const auto *FoundSrcReg = llvm::find_if(Regs, [&](MCPhysReg Reg) {
+ return TRI->getEncodingValue(Reg) == TRI->getEncodingValue(SrcReg);
+ });
+ assert(FoundSrcReg != Regs.end());
+ SrcReg = *FoundSrcReg;
- for (MCPhysReg Reg : RegClass.getRegisters()) {
- if (TRI->getEncodingValue(Reg) == TRI->getEncodingValue(DstReg)) {
- DstReg = Reg;
- break;
- }
+ const auto *FoundDstReg = llvm::find_if(Regs, [&](MCPhysReg Reg) {
+ return TRI->getEncodingValue(Reg) == TRI->getEncodingValue(DstReg);
+ });
+ assert(FoundDstReg != Regs.end());
+ DstReg = *FoundDstReg;
+
+ auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
+ bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_I;
+ bool UseVMV = UseVMV_V_I || RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_V;
+ if (UseVMV)
+ MIB.addReg(DstReg, RegState::Undef);
+ if (UseVMV_V_I)
+ MIB = MIB.add(DefMBBI->getOperand(2));
+ else
+ MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
+ if (UseVMV) {
+ const MCInstrDesc &Desc = DefMBBI->getDesc();
+ MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
+ MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
+ MIB.addImm(0); // tu, mu
+ MIB.addReg(RISCV::VL, RegState::Implicit);
+ MIB.addReg(RISCV::VTYPE, RegState::Implicit);
}
- auto EmitCopy = [&](MCRegister SrcReg, MCRegister DstReg, unsigned Opcode) {
- auto MIB = BuildMI(MBB, MBBI, DL, get(Opcode), DstReg);
- bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opcode) == RISCV::VMV_V_I;
- bool UseVMV =
- UseVMV_V_I || RISCV::getRVVMCOpcode(Opcode) == RISCV::VMV_V_V;
- if (UseVMV)
- MIB.addReg(DstReg, RegState::Undef);
- if (UseVMV_V_I)
- MIB = MIB.add(DefMBBI->getOperand(2));
- else
- MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
- if (UseVMV) {
- const MCInstrDesc &Desc = DefMBBI->getDesc();
- MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
- MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
- MIB.addImm(0); // tu, mu
- MIB.addReg(RISCV::VL, RegState::Implicit);
- MIB.addReg(RISCV::VTYPE, RegState::Implicit);
- }
- };
-
- EmitCopy(SrcReg, DstReg, Opc);
SrcReg = SrcReg.id() + (ReversedCopy ? -NumCopied : NumCopied);
DstReg = DstReg.id() + (ReversedCopy ? -NumCopied : NumCopied);
I += NumCopied;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index dd569129db4d72..6449dd0bfc67c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -238,7 +238,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: $v10m2 = PseudoVMV_V_V_M2 undef $v10m2, $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
$v10_v11 = COPY $v8_v9
@@ -272,7 +273,14 @@ body: |
; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
- ; CHECK-NEXT: $v24m8 = VMV8R_V killed $v8m8
+ ; CHECK-NEXT: $v24 = VMV1R_V killed $v8
+ ; CHECK-NEXT: $v25 = VMV1R_V killed $v9
+ ; CHECK-NEXT: $v26 = VMV1R_V killed $v10
+ ; CHECK-NEXT: $v27 = VMV1R_V killed $v11
+ ; CHECK-NEXT: $v28 = VMV1R_V killed $v12
+ ; CHECK-NEXT: $v29 = VMV1R_V killed $v13
+ ; CHECK-NEXT: $v30 = VMV1R_V killed $v14
+ ; CHECK-NEXT: $v31 = VMV1R_V killed $v15
$x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype
$x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype
>From d4eb4f88bf54403466f25a6f734a8a12e8f83c7d Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 14 Mar 2024 17:36:52 +0800
Subject: [PATCH 03/10] Fix wrong vmv.v.v
Created using spr 1.3.4
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 8 +++++---
llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir | 12 ++----------
2 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 3dd0d0c456dd5f..685d14f5887b95 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -331,11 +331,13 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
};
while (I != NumRegs) {
- auto [LMul, RegClass, Opc, VVOpc, VIOpc] = GetCopyInfo(SrcReg, DstReg);
- unsigned NumCopied = 1 << LMul;
+ auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
+ GetCopyInfo(SrcReg, DstReg);
+ unsigned NumCopied = 1 << LMulCopied;
MachineBasicBlock::const_iterator DefMBBI;
- if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
+ if (LMul == LMulCopied &&
+ isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
Opc = VVOpc;
if (DefMBBI->getOpcode() == VIOpc)
Opc = VIOpc;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 6449dd0bfc67c1..5bb6ce250e8db7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -238,8 +238,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v10m2 = VMV2R_V $v8m2
$x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9 = PseudoVLSEG2E32_V_M1 undef $v8_v9, killed $x16, $noreg, 5, 0, implicit $vl, implicit $vtype
$v10_v11 = COPY $v8_v9
@@ -273,14 +272,7 @@ body: |
; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 undef $v15, killed $x16, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15
- ; CHECK-NEXT: $v24 = VMV1R_V killed $v8
- ; CHECK-NEXT: $v25 = VMV1R_V killed $v9
- ; CHECK-NEXT: $v26 = VMV1R_V killed $v10
- ; CHECK-NEXT: $v27 = VMV1R_V killed $v11
- ; CHECK-NEXT: $v28 = VMV1R_V killed $v12
- ; CHECK-NEXT: $v29 = VMV1R_V killed $v13
- ; CHECK-NEXT: $v30 = VMV1R_V killed $v14
- ; CHECK-NEXT: $v31 = VMV1R_V killed $v15
+ ; CHECK-NEXT: $v24m8 = VMV8R_V killed $v8m8
$x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 undef $v8_v9_v10_v11_v12_v13_v14_v15, killed $x12, $noreg, 5, 0, implicit $vl, implicit $vtype
$x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype
>From 2c59650689e0e303446bf6d62e92bcfe6b77da47 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 25 Mar 2024 16:37:39 +0800
Subject: [PATCH 04/10] Reword
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index b8b2122c982b2c..93da06f3ed13e9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -346,12 +346,12 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
const auto *FoundReg = llvm::find_if(Regs, [&](MCPhysReg Reg) {
return TRI->getEncodingValue(Reg) == Encoding;
});
- // We should always be able to find one valid register.
+ // We should be always able to find one valid register.
assert(FoundReg != Regs.end());
return *FoundReg;
};
while (I != NumRegs) {
- // For non-segment copying, we only do this one as the registers are always
+ // For non-segment copying, we only do this once as the registers are always
// aligned.
// For segment copying, we may do this several times. If the registers are
// aligned to larger LMUL, we can eliminate some copyings.
>From c68b537f02bb7e01480188794843a4cd8f8cb19a Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 27 Mar 2024 15:56:59 +0800
Subject: [PATCH 05/10] Address comments
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 3 ++-
llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir | 8 ++++++++
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 93da06f3ed13e9..0079339efb104a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -312,7 +312,8 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
bool ReversedCopy =
forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NumRegs);
if (ReversedCopy) {
- // If there exists overlapping, we should copy the registers reversely.
+ // If the src and dest overlap when copying a tuple, we need to copy the
+ // registers in reverse.
SrcEncoding += NumRegs - LMulVal;
DstEncoding += NumRegs - LMulVal;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
index d9a78d312eab1f..44c33227695be1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
@@ -200,10 +200,18 @@ body: |
; CHECK-NEXT: $v21 = VMV1R_V $v14
; CHECK-NEXT: $v22 = VMV1R_V $v15
; CHECK-NEXT: $v23 = VMV1R_V $v16
+ ; CHECK-NEXT: $v22 = VMV1R_V $v21
+ ; CHECK-NEXT: $v21 = VMV1R_V $v20
+ ; CHECK-NEXT: $v20 = VMV1R_V $v19
+ ; CHECK-NEXT: $v19 = VMV1R_V $v18
+ ; CHECK-NEXT: $v18 = VMV1R_V $v17
+ ; CHECK-NEXT: $v17 = VMV1R_V $v16
+ ; CHECK-NEXT: $v16 = VMV1R_V $v15
$v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16
$v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16
$v16_v17_v18_v19_v20_v21_v22 = COPY $v10_v11_v12_v13_v14_v15_v16
$v17_v18_v19_v20_v21_v22_v23 = COPY $v10_v11_v12_v13_v14_v15_v16
+ $v16_v17_v18_v19_v20_v21_v22 = COPY $v15_v16_v17_v18_v19_v20_v21
...
---
name: copy_zvlsseg_N8
>From f4bfb7f879608729b8830c5ad58e547c115e7efe Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 2 Apr 2024 17:16:15 +0800
Subject: [PATCH 06/10] Fix reversed copying errors
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 53 ++++++++++++++------
llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir | 32 +++++++++---
2 files changed, 64 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 0079339efb104a..60410bae55e468 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -314,27 +314,48 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
if (ReversedCopy) {
// If the src and dest overlap when copying a tuple, we need to copy the
// registers in reverse.
- SrcEncoding += NumRegs - LMulVal;
- DstEncoding += NumRegs - LMulVal;
+ SrcEncoding += NumRegs - 1;
+ DstEncoding += NumRegs - 1;
}
unsigned I = 0;
auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
-> std::tuple<RISCVII::VLMUL, const TargetRegisterClass &, unsigned,
unsigned, unsigned> {
- // If source register encoding and destination register encoding are aligned
- // to 8, we can do a LMUL8 copying.
- if (SrcEncoding % 8 == 0 && DstEncoding % 8 == 0 && I + 8 <= NumRegs)
+ if (ReversedCopy) {
+ // For reversed copying, if there are enough aligned registers(8/4/2), we
+ // can do a larger copy(LMUL8/4/2).
+ // Besides, we have already known that DstEncoding is larger than
+ // SrcEncoding in forwardCopyWillClobberTuple, so the difference between
+ // DstEncoding and SrcEncoding should be >= LMUL value we try to use to
+ // avoid clobbering.
+ uint16_t Diff = DstEncoding - SrcEncoding;
+ if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
+ DstEncoding % 8 == 7)
+ return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+ RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
+ if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
+ DstEncoding % 4 == 3)
+ return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+ RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
+ if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
+ DstEncoding % 2 == 1)
+ return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+ RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
+ // Or we should do LMUL1 copying.
+ return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+ RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
+ }
+
+ // For forward copying, if source register encoding and destination register
+ // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying.
+ if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
- // If source register encoding and destination register encoding are aligned
- // to 4, we can do a LMUL4 copying.
- if (SrcEncoding % 4 == 0 && DstEncoding % 4 == 0 && I + 4 <= NumRegs)
+ if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
- // If source register encoding and destination register encoding are aligned
- // to 2, we can do a LMUL2 copying.
- if (SrcEncoding % 2 == 0 && DstEncoding % 2 == 0 && I + 2 <= NumRegs)
+ if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
// Or we should do LMUL1 copying.
@@ -369,8 +390,11 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
}
// Emit actual copying.
- MCRegister ActualSrcReg = FindRegWithEncoding(RegClass, SrcEncoding);
- MCRegister ActualDstReg = FindRegWithEncoding(RegClass, DstEncoding);
+ // For reversed copying, the encoding should be decreased.
+ MCRegister ActualSrcReg = FindRegWithEncoding(
+ RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
+ MCRegister ActualDstReg = FindRegWithEncoding(
+ RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), ActualDstReg);
bool UseVMV_V_I = RISCV::getRVVMCOpcode(Opc) == RISCV::VMV_V_I;
@@ -390,8 +414,7 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
MIB.addReg(RISCV::VTYPE, RegState::Implicit);
}
- // If we are copying reversely, we should decrease the register encoding
- // number.
+ // If we are copying reversely, we should decrease the encoding.
SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
I += NumCopied;
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
index 44c33227695be1..a44a93449332fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir
@@ -53,8 +53,8 @@ body: |
; CHECK-NEXT: $v4 = VMV1R_V $v5
; CHECK-NEXT: $v5 = VMV1R_V $v6
; CHECK-NEXT: $v6 = VMV1R_V $v7
- ; CHECK-NEXT: $v9 = VMV1R_V $v7
; CHECK-NEXT: $v8m2 = VMV2R_V $v6m2
+ ; CHECK-NEXT: $v7 = VMV1R_V $v5
; CHECK-NEXT: $v9 = VMV1R_V $v5
; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v0m2 = VMV2R_V $v6m2
@@ -62,8 +62,8 @@ body: |
; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4
- ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4
+ ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v12m2 = VMV2R_V $v6m2
; CHECK-NEXT: $v14m2 = VMV2R_V $v8m2
; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
@@ -132,18 +132,24 @@ body: |
; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v8m2 = VMV2R_V $v12m2
; CHECK-NEXT: $v10 = VMV1R_V $v14
- ; CHECK-NEXT: $v18m2 = VMV2R_V $v14m2
+ ; CHECK-NEXT: $v18 = VMV1R_V $v14
; CHECK-NEXT: $v16m2 = VMV2R_V $v12m2
- ; CHECK-NEXT: $v14 = VMV1R_V $v10
+ ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v15 = VMV1R_V $v10
; CHECK-NEXT: $v16 = VMV1R_V $v11
; CHECK-NEXT: $v17 = VMV1R_V $v12
; CHECK-NEXT: $v18 = VMV1R_V $v13
; CHECK-NEXT: $v19 = VMV1R_V $v14
+ ; CHECK-NEXT: $v7 = VMV1R_V $v11
+ ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v15 = VMV1R_V $v11
$v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14
$v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14
$v14_v15_v16_v17_v18 = COPY $v10_v11_v12_v13_v14
$v15_v16_v17_v18_v19 = COPY $v10_v11_v12_v13_v14
+ $v7_v8_v9_v10_v11 = COPY $v11_v12_v13_v14_v15
+ $v15_v16_v17_v18_v19 = COPY $v11_v12_v13_v14_v15
...
---
name: copy_zvlsseg_N6
@@ -159,6 +165,10 @@ body: |
; CHECK-NEXT: $v8 = VMV1R_V $v13
; CHECK-NEXT: $v9 = VMV1R_V $v14
; CHECK-NEXT: $v10 = VMV1R_V $v15
+ ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v20 = VMV1R_V $v15
; CHECK-NEXT: $v19 = VMV1R_V $v14
; CHECK-NEXT: $v18 = VMV1R_V $v13
@@ -170,6 +180,8 @@ body: |
; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2
$v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15
$v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15
+ $v6_v7_v8_v9_v10_v11 = COPY $v10_v11_v12_v13_v14_v15
+ $v14_v15_v16_v17_v18_v19 = COPY $v10_v11_v12_v13_v14_v15
$v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15
$v16_v17_v18_v19_v20_v21 = COPY $v10_v11_v12_v13_v14_v15
...
@@ -189,10 +201,13 @@ body: |
; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2
; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2
; CHECK-NEXT: $v10 = VMV1R_V $v16
- ; CHECK-NEXT: $v22m2 = VMV2R_V $v16m2
+ ; CHECK-NEXT: $v20 = VMV1R_V $v16
+ ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4
+ ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2
+ ; CHECK-NEXT: $v22 = VMV1R_V $v16
; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2
; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2
- ; CHECK-NEXT: $v16 = VMV1R_V $v10
+ ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v17 = VMV1R_V $v10
; CHECK-NEXT: $v18 = VMV1R_V $v11
; CHECK-NEXT: $v19 = VMV1R_V $v12
@@ -209,6 +224,7 @@ body: |
; CHECK-NEXT: $v16 = VMV1R_V $v15
$v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16
$v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16
+ $v14_v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15_v16
$v16_v17_v18_v19_v20_v21_v22 = COPY $v10_v11_v12_v13_v14_v15_v16
$v17_v18_v19_v20_v21_v22_v23 = COPY $v10_v11_v12_v13_v14_v15_v16
$v16_v17_v18_v19_v20_v21_v22 = COPY $v15_v16_v17_v18_v19_v20_v21
@@ -240,8 +256,12 @@ body: |
; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2
; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4
; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2
+ ; CHECK-NEXT: $v8m8 = VMV8R_V $v0m8
+ ; CHECK-NEXT: $v0m8 = VMV8R_V $v8m8
$v2_v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
$v3_v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
$v17_v18_v19_v20_v21_v22_v23_v24 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
$v18_v19_v20_v21_v22_v23_v24_v25 = COPY $v10_v11_v12_v13_v14_v15_v16_v17
+ $v8_v9_v10_v11_v12_v13_v14_v15 = COPY $v0_v1_v2_v3_v4_v5_v6_v7
+ $v0_v1_v2_v3_v4_v5_v6_v7 = COPY $v8_v9_v10_v11_v12_v13_v14_v15
...
>From 4fa547bed2fef01283e3a66889dbd3eba0653077 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 3 Apr 2024 10:52:26 +0800
Subject: [PATCH 07/10] Use RISCVVType::decodeVLMUL
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 60410bae55e468..550c56d133ca5c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -379,7 +379,7 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
// aligned to larger LMUL, we can eliminate some copyings.
auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
GetCopyInfo(SrcEncoding, DstEncoding);
- unsigned NumCopied = 1 << LMulCopied;
+ auto [NumCopied, _] = RISCVVType::decodeVLMUL(LMul);
MachineBasicBlock::const_iterator DefMBBI;
if (LMul == LMulCopied &&
>From e7884f26efeac2f0c0d6f3882e5a3cac4d6cad57 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 4 Apr 2024 21:08:36 +0800
Subject: [PATCH 08/10] Use uint16_t
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 550c56d133ca5c..60b8468dc2b570 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -33,6 +33,7 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cstdint>
using namespace llvm;
@@ -302,11 +303,9 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
RISCVII::VLMUL LMul, unsigned NF) const {
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
- unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
- unsigned DstEncoding = TRI->getEncodingValue(DstReg);
- unsigned LMulVal;
- bool Fractional;
- std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
+ uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
+ uint16_t DstEncoding = TRI->getEncodingValue(DstReg);
+ auto [LMulVal, Fractional] = RISCVVType::decodeVLMUL(LMul);
assert(!Fractional && "It is impossible be fractional lmul here.");
unsigned NumRegs = NF * LMulVal;
bool ReversedCopy =
>From c8e3cf7aaf5ed4a76d27a63ba6cedabad94927ad Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 5 Apr 2024 00:45:28 +0800
Subject: [PATCH 09/10] Remove include
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 60b8468dc2b570..129155c2495871 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -33,7 +33,6 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
-#include <cstdint>
using namespace llvm;
>From da67b915119baaebeda103c15a03c4ee301b018f Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 8 Apr 2024 13:23:14 +0800
Subject: [PATCH 10/10] Fix wrong LMUL
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index e8a58ea415114f..a1befaf40d09f7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -377,7 +377,7 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
// aligned to larger LMUL, we can eliminate some copyings.
auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
GetCopyInfo(SrcEncoding, DstEncoding);
- auto [NumCopied, _] = RISCVVType::decodeVLMUL(LMul);
+ auto [NumCopied, _] = RISCVVType::decodeVLMUL(LMulCopied);
MachineBasicBlock::const_iterator DefMBBI;
if (LMul == LMulCopied &&
More information about the llvm-branch-commits
mailing list