[llvm] [RISCV] Fold vfmv.f.s of f16 into load from stack (PR #110214)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 29 20:54:41 PDT 2024


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/110214

>From 4cb69a113be206fd1ec8597cde3d4c236a17a7d0 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Fri, 27 Sep 2024 15:16:28 +0800
Subject: [PATCH 1/3] Precommit tests

---
 llvm/test/CodeGen/RISCV/rvv/stack-folding.ll | 78 +++++++++++++++++++-
 1 file changed, 76 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
index f966835622a9f3..5ad4bc401a8b2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32 %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64 %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfhmin,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32 %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfhmin,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64 %s
 
 define i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
 ; RV32-LABEL: i64:
@@ -257,3 +257,77 @@ falsebb:
   ret float 0.0
 }
 
+define half @f16(<vscale x 1 x half> %v, i1 %c) {
+; CHECK-LABEL: f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beqz a0, .LBB6_2
+; CHECK-NEXT:  # %bb.1: # %truebb
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:    j .LBB6_3
+; CHECK-NEXT:  .LBB6_2: # %falsebb
+; CHECK-NEXT:    fmv.h.x fa0, zero
+; CHECK-NEXT:  .LBB6_3: # %falsebb
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+  br i1 %c, label %truebb, label %falsebb
+truebb:
+  %x = extractelement <vscale x 1 x half> %v, i32 0
+  ret half %x
+falsebb:
+  ret half 0.0
+}
+
+define bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
+; CHECK-LABEL: bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    beqz a0, .LBB7_2
+; CHECK-NEXT:  # %bb.1: # %truebb
+; CHECK-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    fmv.h.x fa0, a0
+; CHECK-NEXT:    j .LBB7_3
+; CHECK-NEXT:  .LBB7_2: # %falsebb
+; CHECK-NEXT:    fmv.h.x fa0, zero
+; CHECK-NEXT:  .LBB7_3: # %falsebb
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+  br i1 %c, label %truebb, label %falsebb
+truebb:
+  %x = extractelement <vscale x 2 x bfloat> %v, i32 0
+  ret bfloat %x
+falsebb:
+  ret bfloat 0.0
+}
+

>From cdf9cb4e9d28f98c4f5a2e7a53708ff5c4d11e79 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Fri, 27 Sep 2024 15:20:56 +0800
Subject: [PATCH 2/3] [RISCV] Fold vfmv.f.s of f16 into load from stack

After #110144, we can finish off #110129 and fold f16 vfmv.f.s into a flh.
vfmv.f.s is only available for f16 with zvfh, which in turn requires zfhmin so we can use flh.

bf16 has no vfmv.f.s so the extract_vector_elt is lowered as an integer in #110144, and gets the existing integer vmv.x.s fold.
---
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp     | 4 ++--
 llvm/test/CodeGen/RISCV/rvv/stack-folding.ll | 5 +----
 2 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f0295d289ed86a..0a5b55846ed51c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -804,8 +804,8 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
           MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm();
       switch (Log2SEW) {
       case 4:
-        // TODO: Support f16/bf16
-        return nullptr;
+        LoadOpc = RISCV::FLH;
+        break;
       case 5:
         LoadOpc = RISCV::FLW;
         break;
diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
index 5ad4bc401a8b2c..119d357a386195 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
@@ -273,10 +273,7 @@ define half @f16(<vscale x 1 x half> %v, i1 %c) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    beqz a0, .LBB6_2
 ; CHECK-NEXT:  # %bb.1: # %truebb
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:    flh fa0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    j .LBB6_3
 ; CHECK-NEXT:  .LBB6_2: # %falsebb
 ; CHECK-NEXT:    fmv.h.x fa0, zero

>From 73ec586ed143efa067320e0ebe63c63f900b26a4 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 30 Sep 2024 11:54:04 +0800
Subject: [PATCH 3/3] Add RUN lines for illegal scalar types

---
 llvm/test/CodeGen/RISCV/rvv/stack-folding.ll | 166 +++++++++++++------
 1 file changed, 113 insertions(+), 53 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
index 119d357a386195..7bac7f04837735 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stack-folding.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfhmin,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32 %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfhmin,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64 %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,ZFMIN %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,ZFMIN %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,NOZFMIN %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,NOZFMIN %s
 
 define i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
 ; RV32-LABEL: i64:
@@ -258,31 +260,60 @@ falsebb:
 }
 
 define half @f16(<vscale x 1 x half> %v, i1 %c) {
-; CHECK-LABEL: f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    beqz a0, .LBB6_2
-; CHECK-NEXT:  # %bb.1: # %truebb
-; CHECK-NEXT:    flh fa0, 16(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    j .LBB6_3
-; CHECK-NEXT:  .LBB6_2: # %falsebb
-; CHECK-NEXT:    fmv.h.x fa0, zero
-; CHECK-NEXT:  .LBB6_3: # %falsebb
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+; ZFMIN-LABEL: f16:
+; ZFMIN:       # %bb.0:
+; ZFMIN-NEXT:    addi sp, sp, -16
+; ZFMIN-NEXT:    .cfi_def_cfa_offset 16
+; ZFMIN-NEXT:    csrr a1, vlenb
+; ZFMIN-NEXT:    slli a1, a1, 1
+; ZFMIN-NEXT:    sub sp, sp, a1
+; ZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; ZFMIN-NEXT:    addi a1, sp, 16
+; ZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; ZFMIN-NEXT:    andi a0, a0, 1
+; ZFMIN-NEXT:    #APP
+; ZFMIN-NEXT:    #NO_APP
+; ZFMIN-NEXT:    beqz a0, .LBB6_2
+; ZFMIN-NEXT:  # %bb.1: # %truebb
+; ZFMIN-NEXT:    flh fa0, 16(sp) # 8-byte Folded Reload
+; ZFMIN-NEXT:    j .LBB6_3
+; ZFMIN-NEXT:  .LBB6_2: # %falsebb
+; ZFMIN-NEXT:    fmv.h.x fa0, zero
+; ZFMIN-NEXT:  .LBB6_3: # %falsebb
+; ZFMIN-NEXT:    csrr a0, vlenb
+; ZFMIN-NEXT:    slli a0, a0, 1
+; ZFMIN-NEXT:    add sp, sp, a0
+; ZFMIN-NEXT:    addi sp, sp, 16
+; ZFMIN-NEXT:    ret
+;
+; NOZFMIN-LABEL: f16:
+; NOZFMIN:       # %bb.0:
+; NOZFMIN-NEXT:    addi sp, sp, -16
+; NOZFMIN-NEXT:    .cfi_def_cfa_offset 16
+; NOZFMIN-NEXT:    csrr a1, vlenb
+; NOZFMIN-NEXT:    slli a1, a1, 1
+; NOZFMIN-NEXT:    sub sp, sp, a1
+; NOZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; NOZFMIN-NEXT:    addi a1, sp, 16
+; NOZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; NOZFMIN-NEXT:    andi a0, a0, 1
+; NOZFMIN-NEXT:    #APP
+; NOZFMIN-NEXT:    #NO_APP
+; NOZFMIN-NEXT:    beqz a0, .LBB6_2
+; NOZFMIN-NEXT:  # %bb.1: # %truebb
+; NOZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
+; NOZFMIN-NEXT:    lui a1, 1048560
+; NOZFMIN-NEXT:    or a0, a0, a1
+; NOZFMIN-NEXT:    j .LBB6_3
+; NOZFMIN-NEXT:  .LBB6_2: # %falsebb
+; NOZFMIN-NEXT:    lui a0, 1048560
+; NOZFMIN-NEXT:  .LBB6_3: # %falsebb
+; NOZFMIN-NEXT:    fmv.w.x fa0, a0
+; NOZFMIN-NEXT:    csrr a0, vlenb
+; NOZFMIN-NEXT:    slli a0, a0, 1
+; NOZFMIN-NEXT:    add sp, sp, a0
+; NOZFMIN-NEXT:    addi sp, sp, 16
+; NOZFMIN-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
 truebb:
@@ -293,32 +324,61 @@ falsebb:
 }
 
 define bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
-; CHECK-LABEL: bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    andi a0, a0, 1
-; CHECK-NEXT:    #APP
-; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    beqz a0, .LBB7_2
-; CHECK-NEXT:  # %bb.1: # %truebb
-; CHECK-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    fmv.h.x fa0, a0
-; CHECK-NEXT:    j .LBB7_3
-; CHECK-NEXT:  .LBB7_2: # %falsebb
-; CHECK-NEXT:    fmv.h.x fa0, zero
-; CHECK-NEXT:  .LBB7_3: # %falsebb
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+; ZFMIN-LABEL: bf16:
+; ZFMIN:       # %bb.0:
+; ZFMIN-NEXT:    addi sp, sp, -16
+; ZFMIN-NEXT:    .cfi_def_cfa_offset 16
+; ZFMIN-NEXT:    csrr a1, vlenb
+; ZFMIN-NEXT:    slli a1, a1, 1
+; ZFMIN-NEXT:    sub sp, sp, a1
+; ZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; ZFMIN-NEXT:    addi a1, sp, 16
+; ZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; ZFMIN-NEXT:    andi a0, a0, 1
+; ZFMIN-NEXT:    #APP
+; ZFMIN-NEXT:    #NO_APP
+; ZFMIN-NEXT:    beqz a0, .LBB7_2
+; ZFMIN-NEXT:  # %bb.1: # %truebb
+; ZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
+; ZFMIN-NEXT:    fmv.h.x fa0, a0
+; ZFMIN-NEXT:    j .LBB7_3
+; ZFMIN-NEXT:  .LBB7_2: # %falsebb
+; ZFMIN-NEXT:    fmv.h.x fa0, zero
+; ZFMIN-NEXT:  .LBB7_3: # %falsebb
+; ZFMIN-NEXT:    csrr a0, vlenb
+; ZFMIN-NEXT:    slli a0, a0, 1
+; ZFMIN-NEXT:    add sp, sp, a0
+; ZFMIN-NEXT:    addi sp, sp, 16
+; ZFMIN-NEXT:    ret
+;
+; NOZFMIN-LABEL: bf16:
+; NOZFMIN:       # %bb.0:
+; NOZFMIN-NEXT:    addi sp, sp, -16
+; NOZFMIN-NEXT:    .cfi_def_cfa_offset 16
+; NOZFMIN-NEXT:    csrr a1, vlenb
+; NOZFMIN-NEXT:    slli a1, a1, 1
+; NOZFMIN-NEXT:    sub sp, sp, a1
+; NOZFMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; NOZFMIN-NEXT:    addi a1, sp, 16
+; NOZFMIN-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; NOZFMIN-NEXT:    andi a0, a0, 1
+; NOZFMIN-NEXT:    #APP
+; NOZFMIN-NEXT:    #NO_APP
+; NOZFMIN-NEXT:    beqz a0, .LBB7_2
+; NOZFMIN-NEXT:  # %bb.1: # %truebb
+; NOZFMIN-NEXT:    lh a0, 16(sp) # 8-byte Folded Reload
+; NOZFMIN-NEXT:    lui a1, 1048560
+; NOZFMIN-NEXT:    or a0, a0, a1
+; NOZFMIN-NEXT:    j .LBB7_3
+; NOZFMIN-NEXT:  .LBB7_2: # %falsebb
+; NOZFMIN-NEXT:    lui a0, 1048560
+; NOZFMIN-NEXT:  .LBB7_3: # %falsebb
+; NOZFMIN-NEXT:    fmv.w.x fa0, a0
+; NOZFMIN-NEXT:    csrr a0, vlenb
+; NOZFMIN-NEXT:    slli a0, a0, 1
+; NOZFMIN-NEXT:    add sp, sp, a0
+; NOZFMIN-NEXT:    addi sp, sp, 16
+; NOZFMIN-NEXT:    ret
   tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
   br i1 %c, label %truebb, label %falsebb
 truebb:



More information about the llvm-commits mailing list