[llvm] abe1448 - [SVE] Fix VBITS_GE_256 typo in fixed-width tests.

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 12 12:33:08 PDT 2020


Author: Cameron McInally
Date: 2020-10-12T14:31:34-05:00
New Revision: abe14485fed7741ef5771d11509373c861ec37eb

URL: https://github.com/llvm/llvm-project/commit/abe14485fed7741ef5771d11509373c861ec37eb
DIFF: https://github.com/llvm/llvm-project/commit/abe14485fed7741ef5771d11509373c861ec37eb.diff

LOG: [SVE] Fix VBITS_GE_256 typo in fixed-width tests.

This seems to be a typo that propagated to a number of tests. Replace VBITS_GE_256 with CHECK. There is no VBITS_GE_256.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
index e38d18a9e463..6991c0ad3a68 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
@@ -44,10 +44,10 @@ define half @fmaxv_v8f16(<8 x half> %a) #0 {
 
 define half @fmaxv_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: fmaxv_v16f16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fmaxnmv h0, [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: fmaxnmv h0, [[PG]], [[OP]].h
+; CHECK-NEXT: ret
   %op = load <16 x half>, <16 x half>* %a
   %res = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %op)
   ret half %res
@@ -115,10 +115,10 @@ define float @fmaxv_v4f32(<4 x float> %a) #0 {
 
 define float @fmaxv_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: fmaxv_v8f32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fmaxnmv s0, [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: fmaxnmv s0, [[PG]], [[OP]].s
+; CHECK-NEXT: ret
   %op = load <8 x float>, <8 x float>* %a
   %res = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %op)
   ret float %res
@@ -186,10 +186,10 @@ define double @fmaxv_v2f64(<2 x double> %a) #0 {
 
 define double @fmaxv_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fmaxv_v4f64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fmaxnmv d0, [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: fmaxnmv d0, [[PG]], [[OP]].d
+; CHECK-NEXT: ret
   %op = load <4 x double>, <4 x double>* %a
   %res = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %op)
   ret double %res
@@ -261,10 +261,10 @@ define half @fminv_v8f16(<8 x half> %a) #0 {
 
 define half @fminv_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: fminv_v16f16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fminnmv h0, [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: fminnmv h0, [[PG]], [[OP]].h
+; CHECK-NEXT: ret
   %op = load <16 x half>, <16 x half>* %a
   %res = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %op)
   ret half %res
@@ -332,10 +332,10 @@ define float @fminv_v4f32(<4 x float> %a) #0 {
 
 define float @fminv_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: fminv_v8f32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fminnmv s0, [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: fminnmv s0, [[PG]], [[OP]].s
+; CHECK-NEXT: ret
   %op = load <8 x float>, <8 x float>* %a
   %res = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %op)
   ret float %res
@@ -403,10 +403,10 @@ define double @fminv_v2f64(<2 x double> %a) #0 {
 
 define double @fminv_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fminv_v4f64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: fminnmv d0, [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: fminnmv d0, [[PG]], [[OP]].d
+; CHECK-NEXT: ret
   %op = load <4 x double>, <4 x double>* %a
   %res = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %op)
   ret double %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
index 1570ea2db771..5318d37253e4 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
@@ -42,14 +42,14 @@ define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v16f16:
 ; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_256: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_256-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
@@ -140,14 +140,14 @@ define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v8f32:
 ; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_256: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_256-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x float>, <8 x float>* %a
   %op2 = load <8 x float>, <8 x float>* %b
@@ -238,14 +238,14 @@ define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v4f64:
 ; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_256: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_256-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <4 x i1>, <4 x i1>* %c
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
index ac9ce7111f2d..9af597cd925a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-div.ll
@@ -81,31 +81,31 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 
 define void @sdiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-LABEL: sdiv_v32i8:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
-; VBITS_GE_512-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_256: ptrue [[PG1:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
-; VBITS_GE_256-NEXT: sunpkhi [[OP1_HI:z[0-9]+]].h, [[OP1]].b
-; VBITS_GE_256-NEXT: sunpkhi [[OP2_HI:z[0-9]+]].h, [[OP2]].b
-; VBITS_GE_256-NEXT: sunpklo [[OP2_LO:z[0-9]+]].h, [[OP2]].b
-; VBITS_GE_256-NEXT: sunpklo [[OP1_LO:z[0-9]+]].h, [[OP1]].b
-; VBITS_GE_256-NEXT: sunpkhi [[OP2_HI_HI:z[0-9]]].s, [[OP2_HI]].h
-; VBITS_GE_256-NEXT: sunpkhi [[OP1_HI_HI:z[0-9]]].s, [[OP1_HI]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP2_HI_LO:z[0-9]+]].s, [[OP2_HI]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP1_HI_LO:z[0-9]+]].s, [[OP1_HI]].h
-; VBITS_GE_256-NEXT: sdivr   [[RES_HI_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_HI]].s, [[OP1_HI_HI]].s
-; VBITS_GE_256-NEXT: sunpkhi [[OP2_LO_HI:z[0-9]+]].s, [[OP2_LO]].h
-; VBITS_GE_256-NEXT: sdivr   [[RES_HI_LO:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_LO]].s, [[OP1_HI_LO]].s
-; VBITS_GE_256-NEXT: sunpkhi [[OP1_LO_HI:z[0-9]+]].s, [[OP1_LO]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP2_LO_LO:z[0-9]+]].s, [[OP2_LO]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP1_LO_LO:z[0-9]+]].s, [[OP1_LO]].h
-; VBITS_GE_256-NEXT: sdiv    [[RES_LO_HI:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_HI]].s, [[OP2_LO_HI]].s
-; VBITS_GE_256-NEXT: sdiv    [[RES_LO_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_LO]].s, [[OP2_LO_LO]].s
-; VBITS_GE_256-NEXT: uzp1    [[RES_HI:z[0-9]+]].h, [[RES_HI_LO]].h, [[RES_HI_HI]].h
-; VBITS_GE_256-NEXT: uzp1    [[RES_LO:z[0-9]+]].h, [[RES_LO_LO]].h, [[RES_LO_HI]].h
-; VBITS_GE_256-NEXT: uzp1    [[RES:z[0-9]+]].b, [[RES_LO]].b, [[RES_HI]].b
-; VBITS_GE_256-NEXT: st1b    { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
+; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
+; CHECK-NEXT: sunpkhi [[OP1_HI:z[0-9]+]].h, [[OP1]].b
+; CHECK-NEXT: sunpkhi [[OP2_HI:z[0-9]+]].h, [[OP2]].b
+; CHECK-NEXT: sunpklo [[OP2_LO:z[0-9]+]].h, [[OP2]].b
+; CHECK-NEXT: sunpklo [[OP1_LO:z[0-9]+]].h, [[OP1]].b
+; CHECK-NEXT: sunpkhi [[OP2_HI_HI:z[0-9]]].s, [[OP2_HI]].h
+; CHECK-NEXT: sunpkhi [[OP1_HI_HI:z[0-9]]].s, [[OP1_HI]].h
+; CHECK-NEXT: sunpklo [[OP2_HI_LO:z[0-9]+]].s, [[OP2_HI]].h
+; CHECK-NEXT: sunpklo [[OP1_HI_LO:z[0-9]+]].s, [[OP1_HI]].h
+; CHECK-NEXT: sdivr   [[RES_HI_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_HI]].s, [[OP1_HI_HI]].s
+; CHECK-NEXT: sunpkhi [[OP2_LO_HI:z[0-9]+]].s, [[OP2_LO]].h
+; CHECK-NEXT: sdivr   [[RES_HI_LO:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_LO]].s, [[OP1_HI_LO]].s
+; CHECK-NEXT: sunpkhi [[OP1_LO_HI:z[0-9]+]].s, [[OP1_LO]].h
+; CHECK-NEXT: sunpklo [[OP2_LO_LO:z[0-9]+]].s, [[OP2_LO]].h
+; CHECK-NEXT: sunpklo [[OP1_LO_LO:z[0-9]+]].s, [[OP1_LO]].h
+; CHECK-NEXT: sdiv    [[RES_LO_HI:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_HI]].s, [[OP2_LO_HI]].s
+; CHECK-NEXT: sdiv    [[RES_LO_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_LO]].s, [[OP2_LO_LO]].s
+; CHECK-NEXT: uzp1    [[RES_HI:z[0-9]+]].h, [[RES_HI_LO]].h, [[RES_HI_HI]].h
+; CHECK-NEXT: uzp1    [[RES_LO:z[0-9]+]].h, [[RES_LO_LO]].h, [[RES_LO_HI]].h
+; CHECK-NEXT: uzp1    [[RES:z[0-9]+]].b, [[RES_LO]].b, [[RES_HI]].b
+; CHECK-NEXT: st1b    { [[RES]].b }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
   %res = sdiv <32 x i8> %op1, %op2
@@ -248,19 +248,19 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 
 define void @sdiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: sdiv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
-; VBITS_GE_256-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,2),8)]]
-; VBITS_GE_256-NEXT: sunpkhi [[OP1_HI:z[0-9]+]].s, [[OP1]].h
-; VBITS_GE_256-NEXT: sunpkhi [[OP2_HI:z[0-9]+]].s, [[OP2]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP2_LO:z[0-9]+]].s, [[OP2]].h
-; VBITS_GE_256-NEXT: sunpklo [[OP1_LO:z[0-9]+]].s, [[OP1]].h
-; VBITS_GE_256-NEXT: sdivr   [[RES_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI]].s, [[OP1_HI]].s
-; VBITS_GE_256-NEXT: sdiv    [[RES_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO]].s, [[OP2_LO]].s
-; VBITS_GE_256-NEXT: uzp1 [[RES:z[0-9]+]].h, [[RES_LO]].h, [[RES_HI]].h
-; VBITS_GE_256-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
+; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,2),8)]]
+; CHECK-NEXT: sunpkhi [[OP1_HI:z[0-9]+]].s, [[OP1]].h
+; CHECK-NEXT: sunpkhi [[OP2_HI:z[0-9]+]].s, [[OP2]].h
+; CHECK-NEXT: sunpklo [[OP2_LO:z[0-9]+]].s, [[OP2]].h
+; CHECK-NEXT: sunpklo [[OP1_LO:z[0-9]+]].s, [[OP1]].h
+; CHECK-NEXT: sdivr   [[RES_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI]].s, [[OP1_HI]].s
+; CHECK-NEXT: sdiv    [[RES_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO]].s, [[OP2_LO]].s
+; CHECK-NEXT: uzp1 [[RES:z[0-9]+]].h, [[RES_LO]].h, [[RES_HI]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
   %res = sdiv <16 x i16> %op1, %op2
@@ -356,12 +356,12 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 
 define void @sdiv_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-LABEL: sdiv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; VBITS_GE_256-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: sdiv [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_256-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
+; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK-NEXT: sdiv [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
   %res = sdiv <8 x i32> %op1, %op2
@@ -436,12 +436,12 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 
 define void @sdiv_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-LABEL: sdiv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
-; VBITS_GE_256-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: sdiv [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_256-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
+; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK-NEXT: sdiv [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
   %res = sdiv <4 x i64> %op1, %op2
@@ -555,31 +555,31 @@ define <16 x i8> @udiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 
 define void @udiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-LABEL: udiv_v32i8:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
-; VBITS_GE_512-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_256: ptrue [[PG1:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
-; VBITS_GE_256-NEXT: uunpkhi [[OP1_HI:z[0-9]+]].h, [[OP1]].b
-; VBITS_GE_256-NEXT: uunpkhi [[OP2_HI:z[0-9]+]].h, [[OP2]].b
-; VBITS_GE_256-NEXT: uunpklo [[OP2_LO:z[0-9]+]].h, [[OP2]].b
-; VBITS_GE_256-NEXT: uunpklo [[OP1_LO:z[0-9]+]].h, [[OP1]].b
-; VBITS_GE_256-NEXT: uunpkhi [[OP2_HI_HI:z[0-9]]].s, [[OP2_HI]].h
-; VBITS_GE_256-NEXT: uunpkhi [[OP1_HI_HI:z[0-9]]].s, [[OP1_HI]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP2_HI_LO:z[0-9]+]].s, [[OP2_HI]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP1_HI_LO:z[0-9]+]].s, [[OP1_HI]].h
-; VBITS_GE_256-NEXT: udivr   [[RES_HI_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_HI]].s, [[OP1_HI_HI]].s
-; VBITS_GE_256-NEXT: uunpkhi [[OP2_LO_HI:z[0-9]+]].s, [[OP2_LO]].h
-; VBITS_GE_256-NEXT: udivr   [[RES_HI_LO:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_LO]].s, [[OP1_HI_LO]].s
-; VBITS_GE_256-NEXT: uunpkhi [[OP1_LO_HI:z[0-9]+]].s, [[OP1_LO]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP2_LO_LO:z[0-9]+]].s, [[OP2_LO]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP1_LO_LO:z[0-9]+]].s, [[OP1_LO]].h
-; VBITS_GE_256-NEXT: udiv    [[RES_LO_HI:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_HI]].s, [[OP2_LO_HI]].s
-; VBITS_GE_256-NEXT: udiv    [[RES_LO_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_LO]].s, [[OP2_LO_LO]].s
-; VBITS_GE_256-NEXT: uzp1    [[RES_HI:z[0-9]+]].h, [[RES_HI_LO]].h, [[RES_HI_HI]].h
-; VBITS_GE_256-NEXT: uzp1    [[RES_LO:z[0-9]+]].h, [[RES_LO_LO]].h, [[RES_LO_HI]].h
-; VBITS_GE_256-NEXT: uzp1    [[RES:z[0-9]+]].b, [[RES_LO]].b, [[RES_HI]].b
-; VBITS_GE_256-NEXT: st1b    { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
+; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
+; CHECK-NEXT: uunpkhi [[OP1_HI:z[0-9]+]].h, [[OP1]].b
+; CHECK-NEXT: uunpkhi [[OP2_HI:z[0-9]+]].h, [[OP2]].b
+; CHECK-NEXT: uunpklo [[OP2_LO:z[0-9]+]].h, [[OP2]].b
+; CHECK-NEXT: uunpklo [[OP1_LO:z[0-9]+]].h, [[OP1]].b
+; CHECK-NEXT: uunpkhi [[OP2_HI_HI:z[0-9]]].s, [[OP2_HI]].h
+; CHECK-NEXT: uunpkhi [[OP1_HI_HI:z[0-9]]].s, [[OP1_HI]].h
+; CHECK-NEXT: uunpklo [[OP2_HI_LO:z[0-9]+]].s, [[OP2_HI]].h
+; CHECK-NEXT: uunpklo [[OP1_HI_LO:z[0-9]+]].s, [[OP1_HI]].h
+; CHECK-NEXT: udivr   [[RES_HI_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_HI]].s, [[OP1_HI_HI]].s
+; CHECK-NEXT: uunpkhi [[OP2_LO_HI:z[0-9]+]].s, [[OP2_LO]].h
+; CHECK-NEXT: udivr   [[RES_HI_LO:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI_LO]].s, [[OP1_HI_LO]].s
+; CHECK-NEXT: uunpkhi [[OP1_LO_HI:z[0-9]+]].s, [[OP1_LO]].h
+; CHECK-NEXT: uunpklo [[OP2_LO_LO:z[0-9]+]].s, [[OP2_LO]].h
+; CHECK-NEXT: uunpklo [[OP1_LO_LO:z[0-9]+]].s, [[OP1_LO]].h
+; CHECK-NEXT: udiv    [[RES_LO_HI:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_HI]].s, [[OP2_LO_HI]].s
+; CHECK-NEXT: udiv    [[RES_LO_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO_LO]].s, [[OP2_LO_LO]].s
+; CHECK-NEXT: uzp1    [[RES_HI:z[0-9]+]].h, [[RES_HI_LO]].h, [[RES_HI_HI]].h
+; CHECK-NEXT: uzp1    [[RES_LO:z[0-9]+]].h, [[RES_LO_LO]].h, [[RES_LO_HI]].h
+; CHECK-NEXT: uzp1    [[RES:z[0-9]+]].b, [[RES_LO]].b, [[RES_HI]].b
+; CHECK-NEXT: st1b    { [[RES]].b }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
   %res = udiv <32 x i8> %op1, %op2
@@ -722,19 +722,19 @@ define <8 x i16> @udiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 
 define void @udiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: udiv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
-; VBITS_GE_256-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,2),8)]]
-; VBITS_GE_256-NEXT: uunpkhi [[OP1_HI:z[0-9]+]].s, [[OP1]].h
-; VBITS_GE_256-NEXT: uunpkhi [[OP2_HI:z[0-9]+]].s, [[OP2]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP2_LO:z[0-9]+]].s, [[OP2]].h
-; VBITS_GE_256-NEXT: uunpklo [[OP1_LO:z[0-9]+]].s, [[OP1]].h
-; VBITS_GE_256-NEXT: udivr   [[RES_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI]].s, [[OP1_HI]].s
-; VBITS_GE_256-NEXT: udiv    [[RES_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO]].s, [[OP2_LO]].s
-; VBITS_GE_256-NEXT: uzp1 [[RES:z[0-9]+]].h, [[RES_LO]].h, [[RES_HI]].h
-; VBITS_GE_256-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
+; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,2),8)]]
+; CHECK-NEXT: uunpkhi [[OP1_HI:z[0-9]+]].s, [[OP1]].h
+; CHECK-NEXT: uunpkhi [[OP2_HI:z[0-9]+]].s, [[OP2]].h
+; CHECK-NEXT: uunpklo [[OP2_LO:z[0-9]+]].s, [[OP2]].h
+; CHECK-NEXT: uunpklo [[OP1_LO:z[0-9]+]].s, [[OP1]].h
+; CHECK-NEXT: udivr   [[RES_HI:z[0-9]+]].s, [[PG1]]/m, [[OP2_HI]].s, [[OP1_HI]].s
+; CHECK-NEXT: udiv    [[RES_LO:z[0-9]+]].s, [[PG1]]/m, [[OP1_LO]].s, [[OP2_LO]].s
+; CHECK-NEXT: uzp1 [[RES:z[0-9]+]].h, [[RES_LO]].h, [[RES_HI]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
   %res = udiv <16 x i16> %op1, %op2
@@ -830,12 +830,12 @@ define <4 x i32> @udiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 
 define void @udiv_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-LABEL: udiv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; VBITS_GE_256-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: udiv [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_256-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
+; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK-NEXT: udiv [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
   %res = udiv <8 x i32> %op1, %op2
@@ -910,12 +910,12 @@ define <2 x i64> @udiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 
 define void @udiv_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-LABEL: udiv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
-; VBITS_GE_256-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: udiv [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_256-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
+; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK-NEXT: udiv [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK-NEXT: ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
   %res = udiv <4 x i64> %op1, %op2

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
index 4967f53d1dfe..9e7e190b65a8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
@@ -44,11 +44,11 @@ define i8 @uaddv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: uaddv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -120,11 +120,11 @@ define i16 @uaddv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: uaddv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -196,11 +196,11 @@ define i32 @uaddv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: uaddv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -272,11 +272,11 @@ define i64 @uaddv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: uaddv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -352,11 +352,11 @@ define i8 @smaxv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @smaxv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: smaxv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -428,11 +428,11 @@ define i16 @smaxv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @smaxv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: smaxv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -504,11 +504,11 @@ define i32 @smaxv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @smaxv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: smaxv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -582,11 +582,11 @@ define i64 @smaxv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @smaxv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: smaxv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -662,11 +662,11 @@ define i8 @sminv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @sminv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: sminv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -738,11 +738,11 @@ define i16 @sminv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @sminv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: sminv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -814,11 +814,11 @@ define i32 @sminv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @sminv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: sminv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -892,11 +892,11 @@ define i64 @sminv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @sminv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: sminv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -972,11 +972,11 @@ define i8 @umaxv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @umaxv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: umaxv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: umaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: umaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -1048,11 +1048,11 @@ define i16 @umaxv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @umaxv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: umaxv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: umaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: umaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -1124,11 +1124,11 @@ define i32 @umaxv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @umaxv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: umaxv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: umaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: umaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -1202,11 +1202,11 @@ define i64 @umaxv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @umaxv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: umaxv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: umaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: umaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -1282,11 +1282,11 @@ define i8 @uminv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @uminv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: uminv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: uminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -1358,11 +1358,11 @@ define i16 @uminv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @uminv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: uminv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: uminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -1434,11 +1434,11 @@ define i32 @uminv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @uminv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: uminv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: uminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -1512,11 +1512,11 @@ define i64 @uminv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @uminv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: uminv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: uminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op)
   ret i64 %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
index 904e56fb8c09..1d9fb7e04cda 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
@@ -42,14 +42,14 @@ define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b, <32 x i1>* %c) #0 {
 ; CHECK: select_v32i8:
 ; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].b
-; VBITS_GE_256: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
-; VBITS_GE_256-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
+; CHECK-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
@@ -140,14 +140,14 @@ define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %c) #0 {
 ; CHECK: select_v16i16:
 ; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_256: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_256-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
@@ -238,14 +238,14 @@ define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %c) #0 {
 ; CHECK: select_v8i32:
 ; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_256: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_256-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
@@ -336,14 +336,14 @@ define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b, <4 x i1>* %c) #0 {
 ; CHECK: select_v4i64:
 ; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
 ; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_256: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_256-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_256-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_256-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_256-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
+; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
+; CHECK-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
+; CHECK-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
   %mask = load <4 x i1>, <4 x i1>* %c
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
index ffe72b511e0d..934ed8c0b503 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
@@ -48,11 +48,11 @@ define i8 @andv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @andv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: andv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: andv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: andv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -129,11 +129,11 @@ define i16 @andv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @andv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: andv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: andv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: andv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -209,11 +209,11 @@ define i32 @andv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @andv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: andv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: andv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: andv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -287,11 +287,11 @@ define i64 @andv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @andv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: andv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: andv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: andv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -371,11 +371,11 @@ define i8 @eorv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @eorv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: eorv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -452,11 +452,11 @@ define i16 @eorv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @eorv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: eorv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -532,11 +532,11 @@ define i32 @eorv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @eorv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: eorv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -610,11 +610,11 @@ define i64 @eorv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @eorv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: eorv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %op)
   ret i64 %res
@@ -694,11 +694,11 @@ define i8 @orv_v16i8(<16 x i8> %a) #0 {
 
 define i8 @orv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: orv_v32i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: orv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: orv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <32 x i8>, <32 x i8>* %a
   %res = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %op)
   ret i8 %res
@@ -775,11 +775,11 @@ define i16 @orv_v8i16(<8 x i16> %a) #0 {
 
 define i16 @orv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: orv_v16i16:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: orv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
-; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: orv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; CHECK-NEXT: fmov w0, s[[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <16 x i16>, <16 x i16>* %a
   %res = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %op)
   ret i16 %res
@@ -855,11 +855,11 @@ define i32 @orv_v4i32(<4 x i32> %a) #0 {
 
 define i32 @orv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: orv_v8i32:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: orv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
-; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: orv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; CHECK-NEXT: fmov w0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <8 x i32>, <8 x i32>* %a
   %res = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %op)
   ret i32 %res
@@ -933,11 +933,11 @@ define i64 @orv_v2i64(<2 x i64> %a) #0 {
 
 define i64 @orv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: orv_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: orv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
-; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: orv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
   %op = load <4 x i64>, <4 x i64>* %a
   %res = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %op)
   ret i64 %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
index f62abc094606..656a821c446c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
@@ -199,11 +199,11 @@ define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) #0 {
 ; NOTE: v4i8 is not legal so result i8 elements are held within i16 containers.
 define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i8:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-NEXT: ld1d { [[A_DWORDS:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-NEXT: uzp1 [[A_WORDS:z[0-9]+]].s, [[A_DWORDS]].s, [[A_DWORDS]].s
-; VBITS_GE_256-NEXT: uzp1 z0.h, [[A_WORDS]].h, [[A_WORDS]].h
-; VBITS_GE_256-NEXT: ret
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[A_DWORDS:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: uzp1 [[A_WORDS:z[0-9]+]].s, [[A_DWORDS]].s, [[A_DWORDS]].s
+; CHECK-NEXT: uzp1 z0.h, [[A_WORDS]].h, [[A_WORDS]].h
+; CHECK-NEXT: ret
   %a = load <4 x i64>, <4 x i64>* %in
   %b = trunc <4 x i64> %a to <4 x i8>
   ret <4 x i8> %b


        


More information about the llvm-commits mailing list