[llvm] 55ae180 - [VP] Teach isVPBinaryOp to recognize vp.smin/smax/umin/umax/minnum/maxnum.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 19 16:21:20 PDT 2022


Author: Yeting Kuo
Date: 2022-10-20T07:21:13+08:00
New Revision: 55ae180a4cb7fc68b3ac153f07752c8c6a2d92f0

URL: https://github.com/llvm/llvm-project/commit/55ae180a4cb7fc68b3ac153f07752c8c6a2d92f0
DIFF: https://github.com/llvm/llvm-project/commit/55ae180a4cb7fc68b3ac153f07752c8c6a2d92f0.diff

LOG: [VP] Teach isVPBinaryOp to recognize vp.smin/smax/umin/umax/minnum/maxnum.

Those vp intrinsics should be vp binary operations.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D135753

Added: 
    

Modified: 
    llvm/include/llvm/IR/VPIntrinsics.def
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
index 2769f4e5b87d1..2d10feb5e1616 100644
--- a/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -198,18 +198,22 @@ HELPER_REGISTER_BINARY_INT_VP(vp_xor, VP_XOR, Xor)
 
 // llvm.vp.smin(x,y,mask,vlen)
 BEGIN_REGISTER_VP(vp_smin, 2, 3, VP_SMIN, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_smin, VP_SMIN)
 
 // llvm.vp.smax(x,y,mask,vlen)
 BEGIN_REGISTER_VP(vp_smax, 2, 3, VP_SMAX, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_smax, VP_SMAX)
 
 // llvm.vp.umin(x,y,mask,vlen)
 BEGIN_REGISTER_VP(vp_umin, 2, 3, VP_UMIN, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_umin, VP_UMIN)
 
 // llvm.vp.umax(x,y,mask,vlen)
 BEGIN_REGISTER_VP(vp_umax, 2, 3, VP_UMAX, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_umax, VP_UMAX)
 ///// } Integer Arithmetic
 
@@ -274,10 +278,12 @@ END_REGISTER_VP(vp_copysign, VP_FCOPYSIGN)
 
 // llvm.vp.minnum(x, y, mask,vlen)
 BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_minnum, VP_FMINNUM)
 
 // llvm.vp.maxnum(x, y, mask,vlen)
 BEGIN_REGISTER_VP(vp_maxnum, 2, 3, VP_FMAXNUM, -1)
+VP_PROPERTY_BINARYOP
 END_REGISTER_VP(vp_maxnum, VP_FMAXNUM)
 
 // llvm.vp.ceil(x,mask,vlen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index d174254e4c307..474125b11699a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -357,18 +357,14 @@ define <256 x i8> @vmax_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
   ret <256 x i8> %v
 }
 
-; FIXME: The upper half is doing nothing.
+; The upper half is doing nothing.
 
 define <256 x i8> @vmax_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) {
 ; CHECK-LABEL: vmax_vx_v258i8_evl128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, 128
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT:    vlm.v v24, (a1)
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmax.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index 263c907b17496..0bf408cc292c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -356,18 +356,14 @@ define <256 x i8> @vmaxu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
   ret <256 x i8> %v
 }
 
-; FIXME: The upper half is doing nothing.
+; The upper half is doing nothing.
 
 define <256 x i8> @vmaxu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) {
 ; CHECK-LABEL: vmaxu_vx_v258i8_evl128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, 128
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT:    vlm.v v24, (a1)
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmaxu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index ea6e1d923e938..fddc981ad3c56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -357,18 +357,14 @@ define <256 x i8> @vmin_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
   ret <256 x i8> %v
 }
 
-; FIXME: The upper half is doing nothing.
+; The upper half is doing nothing.
 
 define <256 x i8> @vmin_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) {
 ; CHECK-LABEL: vmin_vx_v258i8_evl128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, 128
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT:    vlm.v v24, (a1)
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmin.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index ed3cb9d73fa53..83b70b95d2b07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -356,18 +356,14 @@ define <256 x i8> @vminu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
   ret <256 x i8> %v
 }
 
-; FIXME: The upper half is doing nothing.
+; The upper half is doing nothing.
 
 define <256 x i8> @vminu_vx_v258i8_evl128(<256 x i8> %va, i8 %b, <256 x i1> %m) {
 ; CHECK-LABEL: vminu_vx_v258i8_evl128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, 128
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT:    vlm.v v24, (a1)
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vminu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index 081d6bdb05bd3..24414d5410b02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -1150,19 +1150,27 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
 ; for ISD::VSCALE.
 
 define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
-; CHECK-LABEL: vmax_vx_nxv32i32_evl_nx16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmax.vx v16, v16, a0, v0.t
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmax_vx_nxv32i32_evl_nx16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vmax.vx v8, v8, a0, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmax_vx_nxv32i32_evl_nx16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 2
+; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vx v24, v0, a2
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vmax.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    vmax.vx v16, v16, a0, v0.t
+; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 5eb453d80c96a..a7fce573da9fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -1149,19 +1149,27 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
 ; for ISD::VSCALE.
 
 define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
-; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmaxu.vx v16, v16, a0, v0.t
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vmaxu.vx v8, v8, a0, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 2
+; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vx v24, v0, a2
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    vmaxu.vx v16, v16, a0, v0.t
+; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 329b7b70f3020..ae749e52fcfa8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -1150,19 +1150,27 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
 ; for ISD::VSCALE.
 
 define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
-; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vmin.vx v16, v16, a0, v0.t
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmin_vx_nxv32i32_evl_nx16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vmin.vx v8, v8, a0, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmin_vx_nxv32i32_evl_nx16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 2
+; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vx v24, v0, a2
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    vmin.vx v16, v16, a0, v0.t
+; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 939a046887f35..d3d5d6ece9b41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -1149,19 +1149,27 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
 ; for ISD::VSCALE.
 
 define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
-; CHECK-LABEL: vminu_vx_nxv32i32_evl_nx16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v0, a2
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vminu.vx v16, v16, a0, v0.t
-; CHECK-NEXT:    ret
+; RV32-LABEL: vminu_vx_nxv32i32_evl_nx16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vminu.vx v8, v8, a0, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vminu_vx_nxv32i32_evl_nx16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 2
+; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vslidedown.vx v24, v0, a2
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vminu.vx v8, v8, a0, v0.t
+; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
+; RV64-NEXT:    vmv1r.v v0, v24
+; RV64-NEXT:    vminu.vx v16, v16, a0, v0.t
+; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()


        


More information about the llvm-commits mailing list