[llvm] ad06e65 - [RISCV] Fix the bug in the register allocator caused by reserved BP.

Hsiangkai Wang via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 20 17:24:11 PST 2022


Author: Hsiangkai Wang
Date: 2022-01-21T01:23:01Z
New Revision: ad06e65dc4da45be2cda3e07664502651679622a

URL: https://github.com/llvm/llvm-project/commit/ad06e65dc4da45be2cda3e07664502651679622a
DIFF: https://github.com/llvm/llvm-project/commit/ad06e65dc4da45be2cda3e07664502651679622a.diff

LOG: [RISCV] Fix the bug in the register allocator caused by reserved BP.

Originally, hasRVVFrameObject() will scan all the stack objects to check
whether if there is any scalable vector object on the stack or not.
However, it causes errors in the register allocator. In issue 53016, it
returns false before RA because there is no RVV stack objects. After RA,
it returns true because there are spilling slots for RVV values during RA.
The compiler will not reserve BP during register allocation and generate BP
access in the PEI pass due to the inconsistent behavior of the function.

The function is changed to use hasStdExtV() as the return value. It is
not precise, but it can make the register allocation correct.

Refer to https://github.com/llvm/llvm-project/issues/53016.

Differential Revision: https://reviews.llvm.org/D117663

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f5d491938050..ad003404d793 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -242,7 +242,8 @@ bool RISCVFrameLowering::hasBP(const MachineFunction &MF) const {
   // adjustment, we can not use SP to access the stack objects for the
   // arguments. Instead, use BP to access these stack objects.
   return (MFI.hasVarSizedObjects() ||
-          (!hasReservedCallFrame(MF) && MFI.getMaxCallFrameSize() != 0)) &&
+          (!hasReservedCallFrame(MF) && (!MFI.isMaxCallFrameSizeComputed() ||
+                                         MFI.getMaxCallFrameSize() != 0))) &&
          TRI->hasStackRealignment(MF);
 }
 
@@ -940,11 +941,22 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
 }
 
 static bool hasRVVFrameObject(const MachineFunction &MF) {
-  const MachineFrameInfo &MFI = MF.getFrameInfo();
-  for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I)
-    if (MFI.getStackID(I) == TargetStackID::ScalableVector)
-      return true;
-  return false;
+  // Originally, the function will scan all the stack objects to check whether
+  // if there is any scalable vector object on the stack or not. However, it
+  // causes errors in the register allocator. In issue 53016, it returns false
+  // before RA because there is no RVV stack objects. After RA, it returns true
+  // because there are spilling slots for RVV values during RA. It will not
+  // reserve BP during register allocation and generate BP access in the PEI
+  // pass due to the inconsistent behavior of the function.
+  //
+  // The function is changed to use hasVInstructions() as the return value. It
+  // is not precise, but it can make the register allocation correct.
+  //
+  // FIXME: Find a better way to make the decision or revisit the solution in
+  // D103622.
+  //
+  // Refer to https://github.com/llvm/llvm-project/issues/53016.
+  return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
 }
 
 // Not preserve stack space within prologue for outgoing variables when the

diff  --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 4b62b8ead3b5..8b33e981854d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -51,36 +51,34 @@ body:             |
   ; CHECK-LABEL: name: spillslot
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK-NEXT:   liveins: $x12, $x1, $x9, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
+  ; CHECK-NEXT:   liveins: $x12, $x1, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -2032
   ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 2032
   ; CHECK-NEXT:   SD killed $x1, $x2, 2024 :: (store (s64) into %stack.3)
   ; CHECK-NEXT:   SD killed $x8, $x2, 2016 :: (store (s64) into %stack.4)
-  ; CHECK-NEXT:   SD killed $x9, $x2, 2008 :: (store (s64) into %stack.5)
-  ; CHECK-NEXT:   SD killed $x18, $x2, 2000 :: (store (s64) into %stack.6)
-  ; CHECK-NEXT:   SD killed $x19, $x2, 1992 :: (store (s64) into %stack.7)
-  ; CHECK-NEXT:   SD killed $x20, $x2, 1984 :: (store (s64) into %stack.8)
-  ; CHECK-NEXT:   SD killed $x21, $x2, 1976 :: (store (s64) into %stack.9)
-  ; CHECK-NEXT:   SD killed $x22, $x2, 1968 :: (store (s64) into %stack.10)
-  ; CHECK-NEXT:   SD killed $x23, $x2, 1960 :: (store (s64) into %stack.11)
-  ; CHECK-NEXT:   SD killed $x24, $x2, 1952 :: (store (s64) into %stack.12)
-  ; CHECK-NEXT:   SD killed $x25, $x2, 1944 :: (store (s64) into %stack.13)
-  ; CHECK-NEXT:   SD killed $x26, $x2, 1936 :: (store (s64) into %stack.14)
-  ; CHECK-NEXT:   SD killed $x27, $x2, 1928 :: (store (s64) into %stack.15)
+  ; CHECK-NEXT:   SD killed $x18, $x2, 2008 :: (store (s64) into %stack.5)
+  ; CHECK-NEXT:   SD killed $x19, $x2, 2000 :: (store (s64) into %stack.6)
+  ; CHECK-NEXT:   SD killed $x20, $x2, 1992 :: (store (s64) into %stack.7)
+  ; CHECK-NEXT:   SD killed $x21, $x2, 1984 :: (store (s64) into %stack.8)
+  ; CHECK-NEXT:   SD killed $x22, $x2, 1976 :: (store (s64) into %stack.9)
+  ; CHECK-NEXT:   SD killed $x23, $x2, 1968 :: (store (s64) into %stack.10)
+  ; CHECK-NEXT:   SD killed $x24, $x2, 1960 :: (store (s64) into %stack.11)
+  ; CHECK-NEXT:   SD killed $x25, $x2, 1952 :: (store (s64) into %stack.12)
+  ; CHECK-NEXT:   SD killed $x26, $x2, 1944 :: (store (s64) into %stack.13)
+  ; CHECK-NEXT:   SD killed $x27, $x2, 1936 :: (store (s64) into %stack.14)
   ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x1, -8
   ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x8, -16
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x9, -24
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x18, -32
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x19, -40
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x20, -48
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x21, -56
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x22, -64
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x23, -72
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x24, -80
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x25, -88
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x26, -96
-  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x27, -104
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x18, -24
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x19, -32
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x20, -40
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x21, -48
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x22, -56
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x23, -64
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x24, -72
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x25, -80
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x26, -88
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $x27, -96
   ; CHECK-NEXT:   $x8 = frame-setup ADDI $x2, 2032
   ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa $x8, 0
   ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -272
@@ -95,7 +93,7 @@ body:             |
   ; CHECK-NEXT:   $x10 = ADDI $x0, 50
   ; CHECK-NEXT:   $x11 = MUL killed $x11, killed $x10
   ; CHECK-NEXT:   $x10 = LUI 1
-  ; CHECK-NEXT:   $x10 = ADDIW killed $x10, -1896
+  ; CHECK-NEXT:   $x10 = ADDIW killed $x10, -1888
   ; CHECK-NEXT:   $x10 = ADD $x2, killed $x10
   ; CHECK-NEXT:   $x10 = ADD killed $x10, killed $x11
   ; CHECK-NEXT:   PseudoVSPILL_M1 killed renamable $v25, killed $x10 :: (store unknown-size into %stack.1, align 8)
@@ -113,8 +111,8 @@ body:             |
   ; CHECK-NEXT:   renamable $x21 = ADDI $x2, 1664
   ; CHECK-NEXT:   renamable $x22 = ADDI $x2, 1792
   ; CHECK-NEXT:   renamable $x23 = ADDI $x2, 1920
-  ; CHECK-NEXT:   SD killed $x1, $x2, 8 :: (store (s64) into %stack.16)
-  ; CHECK-NEXT:   SD killed $x5, $x2, 0 :: (store (s64) into %stack.17)
+  ; CHECK-NEXT:   SD killed $x1, $x2, 8 :: (store (s64) into %stack.15)
+  ; CHECK-NEXT:   SD killed $x5, $x2, 0 :: (store (s64) into %stack.16)
   ; CHECK-NEXT:   $x11 = LUI 1
   ; CHECK-NEXT:   $x11 = ADDIW killed $x11, -2048
   ; CHECK-NEXT:   $x24 = ADD $x2, killed $x11
@@ -130,23 +128,19 @@ body:             |
   ; CHECK-NEXT:   renamable $x13 = SLLI renamable $x11, 3
   ; CHECK-NEXT:   renamable $x13 = ADD renamable $x26, killed renamable $x13
   ; CHECK-NEXT:   renamable $x13 = LD killed renamable $x13, 0 :: (load (s64))
-  ; CHECK-NEXT:   renamable $x9 = SRAI renamable $x13, 63
-  ; CHECK-NEXT:   renamable $x9 = SRLI killed renamable $x9, 62
-  ; CHECK-NEXT:   renamable $x9 = ADD renamable $x13, killed renamable $x9
-  ; CHECK-NEXT:   renamable $x9 = ANDI killed renamable $x9, -4
-  ; CHECK-NEXT:   renamable $x16 = SUB killed renamable $x13, renamable $x9
+  ; CHECK-NEXT:   renamable $x16 = SUB killed renamable $x13, renamable $x13
   ; CHECK-NEXT:   dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
   ; CHECK-NEXT:   renamable $x13 = nsw ADDI renamable $x16, -2
   ; CHECK-NEXT:   $x5 = PseudoReadVLENB
   ; CHECK-NEXT:   $x1 = ADDI $x0, 50
   ; CHECK-NEXT:   $x5 = MUL killed $x5, killed $x1
   ; CHECK-NEXT:   $x1 = LUI 1
-  ; CHECK-NEXT:   $x1 = ADDIW killed $x1, -1896
+  ; CHECK-NEXT:   $x1 = ADDIW killed $x1, -1888
   ; CHECK-NEXT:   $x1 = ADD $x2, killed $x1
   ; CHECK-NEXT:   $x1 = ADD killed $x1, killed $x5
-  ; CHECK-NEXT:   $x5 = LD $x2, 0 :: (load (s64) from %stack.17)
+  ; CHECK-NEXT:   $x5 = LD $x2, 0 :: (load (s64) from %stack.16)
   ; CHECK-NEXT:   renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8)
-  ; CHECK-NEXT:   $x1 = LD $x2, 8 :: (load (s64) from %stack.16)
+  ; CHECK-NEXT:   $x1 = LD $x2, 8 :: (load (s64) from %stack.15)
   ; CHECK-NEXT:   renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 8, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 8, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   BLT killed renamable $x16, renamable $x27, %bb.2
@@ -155,7 +149,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT:   liveins: $x1, $x5, $x6, $x7, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   renamable $x9 = COPY killed renamable $x13
+  ; CHECK-NEXT:   renamable $x10 = COPY killed renamable $x13
   ; CHECK-NEXT:   PseudoBR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -165,17 +159,16 @@ body:             |
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 272
   ; CHECK-NEXT:   $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
   ; CHECK-NEXT:   $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
-  ; CHECK-NEXT:   $x9 = LD $x2, 2008 :: (load (s64) from %stack.5)
-  ; CHECK-NEXT:   $x18 = LD $x2, 2000 :: (load (s64) from %stack.6)
-  ; CHECK-NEXT:   $x19 = LD $x2, 1992 :: (load (s64) from %stack.7)
-  ; CHECK-NEXT:   $x20 = LD $x2, 1984 :: (load (s64) from %stack.8)
-  ; CHECK-NEXT:   $x21 = LD $x2, 1976 :: (load (s64) from %stack.9)
-  ; CHECK-NEXT:   $x22 = LD $x2, 1968 :: (load (s64) from %stack.10)
-  ; CHECK-NEXT:   $x23 = LD $x2, 1960 :: (load (s64) from %stack.11)
-  ; CHECK-NEXT:   $x24 = LD $x2, 1952 :: (load (s64) from %stack.12)
-  ; CHECK-NEXT:   $x25 = LD $x2, 1944 :: (load (s64) from %stack.13)
-  ; CHECK-NEXT:   $x26 = LD $x2, 1936 :: (load (s64) from %stack.14)
-  ; CHECK-NEXT:   $x27 = LD $x2, 1928 :: (load (s64) from %stack.15)
+  ; CHECK-NEXT:   $x18 = LD $x2, 2008 :: (load (s64) from %stack.5)
+  ; CHECK-NEXT:   $x19 = LD $x2, 2000 :: (load (s64) from %stack.6)
+  ; CHECK-NEXT:   $x20 = LD $x2, 1992 :: (load (s64) from %stack.7)
+  ; CHECK-NEXT:   $x21 = LD $x2, 1984 :: (load (s64) from %stack.8)
+  ; CHECK-NEXT:   $x22 = LD $x2, 1976 :: (load (s64) from %stack.9)
+  ; CHECK-NEXT:   $x23 = LD $x2, 1968 :: (load (s64) from %stack.10)
+  ; CHECK-NEXT:   $x24 = LD $x2, 1960 :: (load (s64) from %stack.11)
+  ; CHECK-NEXT:   $x25 = LD $x2, 1952 :: (load (s64) from %stack.12)
+  ; CHECK-NEXT:   $x26 = LD $x2, 1944 :: (load (s64) from %stack.13)
+  ; CHECK-NEXT:   $x27 = LD $x2, 1936 :: (load (s64) from %stack.14)
   ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 2032
   ; CHECK-NEXT:   PseudoRET
   bb.0:
@@ -212,11 +205,7 @@ body:             |
     renamable $x13 = SLLI renamable $x11, 3
     renamable $x13 = ADD renamable $x26, killed renamable $x13
     renamable $x13 = LD killed renamable $x13, 0 :: (load (s64))
-    renamable $x9 = SRAI renamable $x13, 63
-    renamable $x9 = SRLI killed renamable $x9, 62
-    renamable $x9 = ADD renamable $x13, killed renamable $x9
-    renamable $x9 = ANDI killed renamable $x9, -4
-    renamable $x16 = SUB killed renamable $x13, renamable $x9
+    renamable $x16 = SUB killed renamable $x13, renamable $x13
     dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
     renamable $x13 = nsw ADDI renamable $x16, -2
     renamable $v0 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8)
@@ -228,7 +217,7 @@ body:             |
     successors: %bb.2
     liveins: $x1, $x5, $x6, $x7, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
 
-    renamable $x9 = COPY killed renamable $x13
+    renamable $x10 = COPY killed renamable $x13
     PseudoBR %bb.2
 
   bb.2:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 23e41d67e0a5..13b856e00a7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -882,11 +882,14 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX1-NEXT:    .cfi_def_cfa_offset 384
 ; LMULMAX1-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
 ; LMULMAX1-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
+; LMULMAX1-NEXT:    sd s1, 360(sp) # 8-byte Folded Spill
 ; LMULMAX1-NEXT:    .cfi_offset ra, -8
 ; LMULMAX1-NEXT:    .cfi_offset s0, -16
+; LMULMAX1-NEXT:    .cfi_offset s1, -24
 ; LMULMAX1-NEXT:    addi s0, sp, 384
 ; LMULMAX1-NEXT:    .cfi_def_cfa s0, 0
 ; LMULMAX1-NEXT:    andi sp, sp, -128
+; LMULMAX1-NEXT:    mv s1, sp
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v24, (a0)
 ; LMULMAX1-NEXT:    addi a1, a0, 16
@@ -904,25 +907,26 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX1-NEXT:    addi a0, a0, 112
 ; LMULMAX1-NEXT:    vle32.v v31, (a0)
 ; LMULMAX1-NEXT:    ld a0, 0(s0)
-; LMULMAX1-NEXT:    addi a1, sp, 240
+; LMULMAX1-NEXT:    addi sp, sp, -16
+; LMULMAX1-NEXT:    addi a1, s1, 240
 ; LMULMAX1-NEXT:    vse32.v v15, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 224
+; LMULMAX1-NEXT:    addi a1, s1, 224
 ; LMULMAX1-NEXT:    vse32.v v14, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 208
+; LMULMAX1-NEXT:    addi a1, s1, 208
 ; LMULMAX1-NEXT:    vse32.v v13, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 192
+; LMULMAX1-NEXT:    addi a1, s1, 192
 ; LMULMAX1-NEXT:    vse32.v v12, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 176
+; LMULMAX1-NEXT:    addi a1, s1, 176
 ; LMULMAX1-NEXT:    vse32.v v11, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 160
+; LMULMAX1-NEXT:    addi a1, s1, 160
 ; LMULMAX1-NEXT:    vse32.v v10, (a1)
-; LMULMAX1-NEXT:    addi a1, sp, 144
+; LMULMAX1-NEXT:    addi a1, s1, 144
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
 ; LMULMAX1-NEXT:    li a1, 42
 ; LMULMAX1-NEXT:    sd a1, 8(sp)
 ; LMULMAX1-NEXT:    sd a0, 0(sp)
-; LMULMAX1-NEXT:    addi a0, sp, 128
-; LMULMAX1-NEXT:    addi a1, sp, 128
+; LMULMAX1-NEXT:    addi a0, s1, 128
+; LMULMAX1-NEXT:    addi a1, s1, 128
 ; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    vmv.v.v v8, v24
 ; LMULMAX1-NEXT:    vmv.v.v v9, v25
@@ -933,9 +937,11 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX1-NEXT:    vmv.v.v v14, v30
 ; LMULMAX1-NEXT:    vmv.v.v v15, v31
 ; LMULMAX1-NEXT:    call ext3 at plt
+; LMULMAX1-NEXT:    addi sp, sp, 16
 ; LMULMAX1-NEXT:    addi sp, s0, -384
 ; LMULMAX1-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
+; LMULMAX1-NEXT:    ld s1, 360(sp) # 8-byte Folded Reload
 ; LMULMAX1-NEXT:    addi sp, sp, 384
 ; LMULMAX1-NEXT:    ret
   %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
index 3eccd6c84d9c..5f926efd9012 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
@@ -5,25 +5,27 @@
 define void @foo(i32* nocapture noundef %p1) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    addi sp, sp, -192
+; CHECK-NEXT:    .cfi_def_cfa_offset 192
+; CHECK-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 168(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 160(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_offset ra, -8
 ; CHECK-NEXT:    .cfi_offset s0, -16
 ; CHECK-NEXT:    .cfi_offset s1, -24
-; CHECK-NEXT:    addi s0, sp, 128
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    addi s0, sp, 192
 ; CHECK-NEXT:    .cfi_def_cfa s0, 0
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 1
 ; CHECK-NEXT:    sub sp, sp, a1
 ; CHECK-NEXT:    andi sp, sp, -64
 ; CHECK-NEXT:    mv s1, sp
-; CHECK-NEXT:    mv s1, a0
+; CHECK-NEXT:    mv s2, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    addi a0, s1, 104
+; CHECK-NEXT:    addi a0, s1, 160
 ; CHECK-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    addi t0, s1, 64
@@ -39,16 +41,17 @@ define void @foo(i32* nocapture noundef %p1) {
 ; CHECK-NEXT:    call bar at plt
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (s1)
-; CHECK-NEXT:    addi a0, s1, 104
+; CHECK-NEXT:    vle32.v v8, (s2)
+; CHECK-NEXT:    addi a0, s1, 160
 ; CHECK-NEXT:    vl2re8.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vfadd.vv v8, v10, v8
-; CHECK-NEXT:    vse32.v v8, (s1)
-; CHECK-NEXT:    addi sp, s0, -128
-; CHECK-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 128
+; CHECK-NEXT:    vse32.v v8, (s2)
+; CHECK-NEXT:    addi sp, s0, -192
+; CHECK-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 168(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 160(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 192
 ; CHECK-NEXT:    ret
 entry:
   %vla = alloca [10 x i32], align 64

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 5f52fdba85c3..6bae64140888 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -600,30 +600,30 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    addi sp, sp, -96
 ; RV32MV-NEXT:    sw ra, 92(sp) # 4-byte Folded Spill
 ; RV32MV-NEXT:    sw s0, 88(sp) # 4-byte Folded Spill
-; RV32MV-NEXT:    sw s1, 84(sp) # 4-byte Folded Spill
-; RV32MV-NEXT:    sw s2, 80(sp) # 4-byte Folded Spill
-; RV32MV-NEXT:    sw s3, 76(sp) # 4-byte Folded Spill
-; RV32MV-NEXT:    sw s4, 72(sp) # 4-byte Folded Spill
-; RV32MV-NEXT:    sw s5, 68(sp) # 4-byte Folded Spill
+; RV32MV-NEXT:    sw s2, 84(sp) # 4-byte Folded Spill
+; RV32MV-NEXT:    sw s3, 80(sp) # 4-byte Folded Spill
+; RV32MV-NEXT:    sw s4, 76(sp) # 4-byte Folded Spill
+; RV32MV-NEXT:    sw s5, 72(sp) # 4-byte Folded Spill
+; RV32MV-NEXT:    sw s6, 68(sp) # 4-byte Folded Spill
 ; RV32MV-NEXT:    addi s0, sp, 96
 ; RV32MV-NEXT:    andi sp, sp, -32
-; RV32MV-NEXT:    mv s1, a0
+; RV32MV-NEXT:    mv s2, a0
 ; RV32MV-NEXT:    lw a0, 8(a0)
-; RV32MV-NEXT:    lw a1, 4(s1)
+; RV32MV-NEXT:    lw a1, 4(s2)
 ; RV32MV-NEXT:    slli a2, a0, 31
 ; RV32MV-NEXT:    srli a3, a1, 1
-; RV32MV-NEXT:    or s2, a3, a2
-; RV32MV-NEXT:    lbu a2, 12(s1)
+; RV32MV-NEXT:    or s3, a3, a2
+; RV32MV-NEXT:    lbu a2, 12(s2)
 ; RV32MV-NEXT:    srli a3, a0, 1
 ; RV32MV-NEXT:    andi a3, a3, 1
-; RV32MV-NEXT:    neg s3, a3
+; RV32MV-NEXT:    neg s4, a3
 ; RV32MV-NEXT:    slli a3, a2, 30
 ; RV32MV-NEXT:    srli a0, a0, 2
-; RV32MV-NEXT:    or s4, a0, a3
+; RV32MV-NEXT:    or s5, a0, a3
 ; RV32MV-NEXT:    srli a0, a2, 2
 ; RV32MV-NEXT:    andi a2, a0, 1
-; RV32MV-NEXT:    lw a0, 0(s1)
-; RV32MV-NEXT:    neg s5, a2
+; RV32MV-NEXT:    lw a0, 0(s2)
+; RV32MV-NEXT:    neg s6, a2
 ; RV32MV-NEXT:    andi a1, a1, 1
 ; RV32MV-NEXT:    neg a1, a1
 ; RV32MV-NEXT:    li a2, 6
@@ -633,14 +633,14 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    sw a0, 32(sp)
 ; RV32MV-NEXT:    li a2, -5
 ; RV32MV-NEXT:    li a3, -1
-; RV32MV-NEXT:    mv a0, s4
-; RV32MV-NEXT:    mv a1, s5
+; RV32MV-NEXT:    mv a0, s5
+; RV32MV-NEXT:    mv a1, s6
 ; RV32MV-NEXT:    call __moddi3 at plt
 ; RV32MV-NEXT:    sw a1, 52(sp)
 ; RV32MV-NEXT:    sw a0, 48(sp)
 ; RV32MV-NEXT:    li a2, 7
-; RV32MV-NEXT:    mv a0, s2
-; RV32MV-NEXT:    mv a1, s3
+; RV32MV-NEXT:    mv a0, s3
+; RV32MV-NEXT:    mv a1, s4
 ; RV32MV-NEXT:    li a3, 0
 ; RV32MV-NEXT:    call __moddi3 at plt
 ; RV32MV-NEXT:    sw a1, 44(sp)
@@ -662,14 +662,14 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    vmv.v.i v8, 0
 ; RV32MV-NEXT:    vmerge.vim v8, v8, -1, v0
 ; RV32MV-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
-; RV32MV-NEXT:    vse32.v v8, (s1)
+; RV32MV-NEXT:    vse32.v v8, (s2)
 ; RV32MV-NEXT:    vslidedown.vi v10, v8, 1
 ; RV32MV-NEXT:    vmv.x.s a0, v10
 ; RV32MV-NEXT:    vslidedown.vi v10, v8, 2
 ; RV32MV-NEXT:    vmv.x.s a1, v10
 ; RV32MV-NEXT:    slli a2, a1, 1
 ; RV32MV-NEXT:    sub a0, a2, a0
-; RV32MV-NEXT:    sw a0, 4(s1)
+; RV32MV-NEXT:    sw a0, 4(s2)
 ; RV32MV-NEXT:    vslidedown.vi v10, v8, 4
 ; RV32MV-NEXT:    vmv.x.s a0, v10
 ; RV32MV-NEXT:    srli a2, a0, 30
@@ -678,7 +678,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    slli a3, a3, 2
 ; RV32MV-NEXT:    or a2, a3, a2
 ; RV32MV-NEXT:    andi a2, a2, 7
-; RV32MV-NEXT:    sb a2, 12(s1)
+; RV32MV-NEXT:    sb a2, 12(s2)
 ; RV32MV-NEXT:    srli a1, a1, 31
 ; RV32MV-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32MV-NEXT:    vmv.x.s a2, v8
@@ -687,15 +687,15 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    or a1, a1, a2
 ; RV32MV-NEXT:    slli a0, a0, 2
 ; RV32MV-NEXT:    or a0, a1, a0
-; RV32MV-NEXT:    sw a0, 8(s1)
+; RV32MV-NEXT:    sw a0, 8(s2)
 ; RV32MV-NEXT:    addi sp, s0, -96
 ; RV32MV-NEXT:    lw ra, 92(sp) # 4-byte Folded Reload
 ; RV32MV-NEXT:    lw s0, 88(sp) # 4-byte Folded Reload
-; RV32MV-NEXT:    lw s1, 84(sp) # 4-byte Folded Reload
-; RV32MV-NEXT:    lw s2, 80(sp) # 4-byte Folded Reload
-; RV32MV-NEXT:    lw s3, 76(sp) # 4-byte Folded Reload
-; RV32MV-NEXT:    lw s4, 72(sp) # 4-byte Folded Reload
-; RV32MV-NEXT:    lw s5, 68(sp) # 4-byte Folded Reload
+; RV32MV-NEXT:    lw s2, 84(sp) # 4-byte Folded Reload
+; RV32MV-NEXT:    lw s3, 80(sp) # 4-byte Folded Reload
+; RV32MV-NEXT:    lw s4, 76(sp) # 4-byte Folded Reload
+; RV32MV-NEXT:    lw s5, 72(sp) # 4-byte Folded Reload
+; RV32MV-NEXT:    lw s6, 68(sp) # 4-byte Folded Reload
 ; RV32MV-NEXT:    addi sp, sp, 96
 ; RV32MV-NEXT:    ret
 ;


        


More information about the llvm-commits mailing list