[llvm] d90a427 - [RISCV] Remove loads from single element fixed vector reduction tests. NFC (#122808)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 13 19:37:34 PST 2025


Author: Craig Topper
Date: 2025-01-13T19:37:31-08:00
New Revision: d90a42751f9bfa73ed3555c702e70cf34d97bb39

URL: https://github.com/llvm/llvm-project/commit/d90a42751f9bfa73ed3555c702e70cf34d97bb39
DIFF: https://github.com/llvm/llvm-project/commit/d90a42751f9bfa73ed3555c702e70cf34d97bb39.diff

LOG: [RISCV] Remove loads from single element fixed vector reduction tests. NFC (#122808)

These tests weren't interested in the loads. Removing them reduces the
diffs from #122671.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 8bf30f8f0d072b..2b279389253b01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -4,29 +4,25 @@
 
 declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>)
 
-define half @vreduce_fadd_v1f16(ptr %x, half %s) {
+define half @vreduce_fadd_v1f16(<1 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_fadd_v1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    fadd.h fa0, fa0, fa5
 ; CHECK-NEXT:    ret
-  %v = load <1 x half>, ptr %x
   %red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
   ret half %red
 }
 
-define half @vreduce_ord_fadd_v1f16(ptr %x, half %s) {
+define half @vreduce_ord_fadd_v1f16(<1 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vfredosum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x half>, ptr %x
   %red = call half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
   ret half %red
 }
@@ -271,61 +267,53 @@ define half @vreduce_ord_fadd_v128f16(ptr %x, half %s) {
 
 declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>)
 
-define float @vreduce_fadd_v1f32(ptr %x, float %s) {
+define float @vreduce_fadd_v1f32(<1 x float> %v, float %s) {
 ; CHECK-LABEL: vreduce_fadd_v1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    fadd.s fa0, fa0, fa5
 ; CHECK-NEXT:    ret
-  %v = load <1 x float>, ptr %x
   %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
   ret float %red
 }
 
-define float @vreduce_ord_fadd_v1f32(ptr %x, float %s) {
+define float @vreduce_ord_fadd_v1f32(<1 x float> %v, float %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vfredosum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x float>, ptr %x
   %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
   ret float %red
 }
 
-define float @vreduce_fwadd_v1f32(ptr %x, float %s) {
+define float @vreduce_fwadd_v1f32(<1 x half> %v, float %s) {
 ; CHECK-LABEL: vreduce_fwadd_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa5, v9
 ; CHECK-NEXT:    fadd.s fa0, fa0, fa5
 ; CHECK-NEXT:    ret
-  %v = load <1 x half>, ptr %x
   %e = fpext <1 x half> %v to <1 x float>
   %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %e)
   ret float %red
 }
 
-define float @vreduce_ord_fwadd_v1f32(ptr %x, float %s) {
+define float @vreduce_ord_fwadd_v1f32(<1 x half> %v, float %s) {
 ; CHECK-LABEL: vreduce_ord_fwadd_v1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x half>, ptr %x
   %e = fpext <1 x half> %v to <1 x float>
   %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %e)
   ret float %red
@@ -815,61 +803,53 @@ define float @vreduce_ord_fwadd_v64f32(ptr %x, float %s) {
 
 declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>)
 
-define double @vreduce_fadd_v1f64(ptr %x, double %s) {
+define double @vreduce_fadd_v1f64(<1 x double> %v, double %s) {
 ; CHECK-LABEL: vreduce_fadd_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vfmv.f.s fa5, v8
 ; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
-  %v = load <1 x double>, ptr %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
   ret double %red
 }
 
-define double @vreduce_ord_fadd_v1f64(ptr %x, double %s) {
+define double @vreduce_ord_fadd_v1f64(<1 x double> %v, double %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vfredosum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x double>, ptr %x
   %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
   ret double %red
 }
 
-define double @vreduce_fwadd_v1f64(ptr %x, double %s) {
+define double @vreduce_fwadd_v1f64(<1 x float> %v, double %s) {
 ; CHECK-LABEL: vreduce_fwadd_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa5, v9
 ; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
-  %v = load <1 x float>, ptr %x
   %e = fpext <1 x float> %v to <1 x double>
   %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %e)
   ret double %red
 }
 
-define double @vreduce_ord_fwadd_v1f64(ptr %x, double %s) {
+define double @vreduce_ord_fwadd_v1f64(<1 x float> %v, double %s) {
 ; CHECK-LABEL: vreduce_ord_fwadd_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x float>, ptr %x
   %e = fpext <1 x float> %v to <1 x double>
   %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %e)
   ret double %red

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 2ea618bf8a2260..707d1202aca0f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -4,14 +4,12 @@
 
 declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8>)
 
-define i8 @vreduce_add_v1i8(ptr %x) {
+define i8 @vreduce_add_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_add_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.add.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -169,41 +167,35 @@ define i8 @vreduce_add_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16>)
 
-define i16 @vreduce_add_v1i16(ptr %x) {
+define i16 @vreduce_add_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_add_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %v)
   ret i16 %red
 }
 
-define i16 @vwreduce_add_v1i16(ptr %x) {
+define i16 @vwreduce_add_v1i16(<1 x i8> %v) {
 ; CHECK-LABEL: vwreduce_add_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %e = sext <1 x i8> %v to <1 x i16>
   %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %e)
   ret i16 %red
 }
 
-define i16 @vwreduce_uadd_v1i16(ptr %x) {
+define i16 @vwreduce_uadd_v1i16(<1 x i8> %v) {
 ; CHECK-LABEL: vwreduce_uadd_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %e = zext <1 x i8> %v to <1 x i16>
   %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %e)
   ret i16 %red
@@ -581,41 +573,35 @@ define i16 @vwreduce_uadd_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32>)
 
-define i32 @vreduce_add_v1i32(ptr %x) {
+define i32 @vreduce_add_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_add_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %v)
   ret i32 %red
 }
 
-define i32 @vwreduce_add_v1i32(ptr %x) {
+define i32 @vwreduce_add_v1i32(<1 x i16> %v) {
 ; CHECK-LABEL: vwreduce_add_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %e = sext <1 x i16> %v to <1 x i32>
   %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %e)
   ret i32 %red
 }
 
-define i32 @vwreduce_uadd_v1i32(ptr %x) {
+define i32 @vwreduce_uadd_v1i32(<1 x i16> %v) {
 ; CHECK-LABEL: vwreduce_uadd_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v9
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %e = zext <1 x i16> %v to <1 x i32>
   %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %e)
   ret i32 %red
@@ -940,12 +926,11 @@ define i32 @vwreduce_uadd_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>)
 
-define i64 @vreduce_add_v1i64(ptr %x) {
+define i64 @vreduce_add_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_add_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -954,21 +939,18 @@ define i64 @vreduce_add_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_add_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %v)
   ret i64 %red
 }
 
-define i64 @vwreduce_add_v1i64(ptr %x) {
+define i64 @vwreduce_add_v1i64(<1 x i32> %v) {
 ; RV32-LABEL: vwreduce_add_v1i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsext.vf2 v9, v8
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v8, v9, a0
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -977,23 +959,20 @@ define i64 @vwreduce_add_v1i64(ptr %x) {
 ; RV64-LABEL: vwreduce_add_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    vsext.vf2 v9, v8
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %e = sext <1 x i32> %v to <1 x i64>
   %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %e)
   ret i64 %red
 }
 
-define i64 @vwreduce_uadd_v1i64(ptr %x) {
+define i64 @vwreduce_uadd_v1i64(<1 x i32> %v) {
 ; RV32-LABEL: vwreduce_uadd_v1i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vzext.vf2 v9, v8
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v8, v9, a0
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    vmv.x.s a0, v9
@@ -1002,11 +981,9 @@ define i64 @vwreduce_uadd_v1i64(ptr %x) {
 ; RV64-LABEL: vwreduce_uadd_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    vzext.vf2 v9, v8
 ; RV64-NEXT:    vmv.x.s a0, v9
 ; RV64-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %e = zext <1 x i32> %v to <1 x i64>
   %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %e)
   ret i64 %red
@@ -1670,14 +1647,12 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 
 declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>)
 
-define i8 @vreduce_and_v1i8(ptr %x) {
+define i8 @vreduce_and_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_and_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.and.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -1829,14 +1804,12 @@ define i8 @vreduce_and_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.and.v1i16(<1 x i16>)
 
-define i16 @vreduce_and_v1i16(ptr %x) {
+define i16 @vreduce_and_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_and_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.and.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -1954,14 +1927,12 @@ define i16 @vreduce_and_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32>)
 
-define i32 @vreduce_and_v1i32(ptr %x) {
+define i32 @vreduce_and_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_and_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -2063,12 +2034,11 @@ define i32 @vreduce_and_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>)
 
-define i64 @vreduce_and_v1i64(ptr %x) {
+define i64 @vreduce_and_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_and_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -2077,10 +2047,8 @@ define i64 @vreduce_and_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_and_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.and.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -2273,14 +2241,12 @@ define i64 @vreduce_and_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>)
 
-define i8 @vreduce_or_v1i8(ptr %x) {
+define i8 @vreduce_or_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_or_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.or.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -2430,14 +2396,12 @@ define i8 @vreduce_or_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.or.v1i16(<1 x i16>)
 
-define i16 @vreduce_or_v1i16(ptr %x) {
+define i16 @vreduce_or_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_or_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.or.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -2555,14 +2519,12 @@ define i16 @vreduce_or_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32>)
 
-define i32 @vreduce_or_v1i32(ptr %x) {
+define i32 @vreduce_or_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_or_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -2664,12 +2626,11 @@ define i32 @vreduce_or_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>)
 
-define i64 @vreduce_or_v1i64(ptr %x) {
+define i64 @vreduce_or_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_or_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -2678,10 +2639,8 @@ define i64 @vreduce_or_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_or_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.or.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -2874,14 +2833,12 @@ define i64 @vreduce_or_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>)
 
-define i8 @vreduce_xor_v1i8(ptr %x) {
+define i8 @vreduce_xor_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_xor_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -3039,14 +2996,12 @@ define i8 @vreduce_xor_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.xor.v1i16(<1 x i16>)
 
-define i16 @vreduce_xor_v1i16(ptr %x) {
+define i16 @vreduce_xor_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_xor_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.xor.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -3171,14 +3126,12 @@ define i16 @vreduce_xor_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32>)
 
-define i32 @vreduce_xor_v1i32(ptr %x) {
+define i32 @vreduce_xor_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_xor_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -3286,12 +3239,11 @@ define i32 @vreduce_xor_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>)
 
-define i64 @vreduce_xor_v1i64(ptr %x) {
+define i64 @vreduce_xor_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_xor_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -3300,10 +3252,8 @@ define i64 @vreduce_xor_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_xor_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.xor.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -3508,14 +3458,12 @@ define i64 @vreduce_xor_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.smin.v1i8(<1 x i8>)
 
-define i8 @vreduce_smin_v1i8(ptr %x) {
+define i8 @vreduce_smin_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -3666,14 +3614,12 @@ define i8 @vreduce_smin_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.smin.v1i16(<1 x i16>)
 
-define i16 @vreduce_smin_v1i16(ptr %x) {
+define i16 @vreduce_smin_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_smin_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -3791,14 +3737,12 @@ define i16 @vreduce_smin_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32>)
 
-define i32 @vreduce_smin_v1i32(ptr %x) {
+define i32 @vreduce_smin_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_smin_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -3900,12 +3844,11 @@ define i32 @vreduce_smin_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>)
 
-define i64 @vreduce_smin_v1i64(ptr %x) {
+define i64 @vreduce_smin_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_smin_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -3914,10 +3857,8 @@ define i64 @vreduce_smin_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_smin_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -4110,14 +4051,12 @@ define i64 @vreduce_smin_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.smax.v1i8(<1 x i8>)
 
-define i8 @vreduce_smax_v1i8(ptr %x) {
+define i8 @vreduce_smax_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -4268,14 +4207,12 @@ define i8 @vreduce_smax_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.smax.v1i16(<1 x i16>)
 
-define i16 @vreduce_smax_v1i16(ptr %x) {
+define i16 @vreduce_smax_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_smax_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -4393,14 +4330,12 @@ define i16 @vreduce_smax_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32>)
 
-define i32 @vreduce_smax_v1i32(ptr %x) {
+define i32 @vreduce_smax_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_smax_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -4502,12 +4437,11 @@ define i32 @vreduce_smax_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>)
 
-define i64 @vreduce_smax_v1i64(ptr %x) {
+define i64 @vreduce_smax_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_smax_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -4516,10 +4450,8 @@ define i64 @vreduce_smax_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_smax_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -4712,14 +4644,12 @@ define i64 @vreduce_smax_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.umin.v1i8(<1 x i8>)
 
-define i8 @vreduce_umin_v1i8(ptr %x) {
+define i8 @vreduce_umin_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_umin_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -4870,14 +4800,12 @@ define i8 @vreduce_umin_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.umin.v1i16(<1 x i16>)
 
-define i16 @vreduce_umin_v1i16(ptr %x) {
+define i16 @vreduce_umin_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_umin_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -4995,14 +4923,12 @@ define i16 @vreduce_umin_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32>)
 
-define i32 @vreduce_umin_v1i32(ptr %x) {
+define i32 @vreduce_umin_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_umin_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -5104,12 +5030,11 @@ define i32 @vreduce_umin_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>)
 
-define i64 @vreduce_umin_v1i64(ptr %x) {
+define i64 @vreduce_umin_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_umin_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -5118,10 +5043,8 @@ define i64 @vreduce_umin_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_umin_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -5314,14 +5237,12 @@ define i64 @vreduce_umin_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8>)
 
-define i8 @vreduce_umax_v1i8(ptr %x) {
+define i8 @vreduce_umax_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_umax_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -5471,14 +5392,12 @@ define i8 @vreduce_umax_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.umax.v1i16(<1 x i16>)
 
-define i16 @vreduce_umax_v1i16(ptr %x) {
+define i16 @vreduce_umax_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_umax_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -5596,14 +5515,12 @@ define i16 @vreduce_umax_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32>)
 
-define i32 @vreduce_umax_v1i32(ptr %x) {
+define i32 @vreduce_umax_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_umax_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -5705,12 +5622,11 @@ define i32 @vreduce_umax_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>)
 
-define i64 @vreduce_umax_v1i64(ptr %x) {
+define i64 @vreduce_umax_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_umax_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -5719,10 +5635,8 @@ define i64 @vreduce_umax_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_umax_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> %v)
   ret i64 %red
 }
@@ -5915,14 +5829,12 @@ define i64 @vreduce_umax_v64i64(ptr %x) nounwind {
 
 declare i8 @llvm.vector.reduce.mul.v1i8(<1 x i8>)
 
-define i8 @vreduce_mul_v1i8(ptr %x) {
+define i8 @vreduce_mul_v1i8(<1 x i8> %v) {
 ; CHECK-LABEL: vreduce_mul_v1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i8>, ptr %x
   %red = call i8 @llvm.vector.reduce.mul.v1i8(<1 x i8> %v)
   ret i8 %red
 }
@@ -6147,14 +6059,12 @@ define i8 @vreduce_mul_v256i8(ptr %x) {
 
 declare i16 @llvm.vector.reduce.mul.v1i16(<1 x i16>)
 
-define i16 @vreduce_mul_v1i16(ptr %x) {
+define i16 @vreduce_mul_v1i16(<1 x i16> %v) {
 ; CHECK-LABEL: vreduce_mul_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i16>, ptr %x
   %red = call i16 @llvm.vector.reduce.mul.v1i16(<1 x i16> %v)
   ret i16 %red
 }
@@ -6321,14 +6231,12 @@ define i16 @vreduce_mul_v128i16(ptr %x) {
 
 declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32>)
 
-define i32 @vreduce_mul_v1i32(ptr %x) {
+define i32 @vreduce_mul_v1i32(<1 x i32> %v) {
 ; CHECK-LABEL: vreduce_mul_v1i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
-  %v = load <1 x i32>, ptr %x
   %red = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> %v)
   ret i32 %red
 }
@@ -6464,12 +6372,11 @@ define i32 @vreduce_mul_v64i32(ptr %x) {
 
 declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>)
 
-define i64 @vreduce_mul_v1i64(ptr %x) {
+define i64 @vreduce_mul_v1i64(<1 x i64> %v) {
 ; RV32-LABEL: vreduce_mul_v1i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -6478,10 +6385,8 @@ define i64 @vreduce_mul_v1i64(ptr %x) {
 ; RV64-LABEL: vreduce_mul_v1i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    ret
-  %v = load <1 x i64>, ptr %x
   %red = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> %v)
   ret i64 %red
 }


        


More information about the llvm-commits mailing list