[llvm] e772c0e - [RISCV] Use vmv.v.x if Hi bits are undef when lowering splat_vector_parts
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 24 04:19:16 PDT 2023
Author: Luke Lau
Date: 2023-08-24T12:19:09+01:00
New Revision: e772c0ecd814f69ede559ac856230ae7651c9d0c
URL: https://github.com/llvm/llvm-project/commit/e772c0ecd814f69ede559ac856230ae7651c9d0c
DIFF: https://github.com/llvm/llvm-project/commit/e772c0ecd814f69ede559ac856230ae7651c9d0c.diff
LOG: [RISCV] Use vmv.v.x if Hi bits are undef when lowering splat_vector_parts
When lowering a splat_vector_parts, if the hi bits are undefined then we can
splat the lo bits without having to check if it's going to be sign extended or
not, because those bits will be undefined anyway.
I've handled it for both fixed and scalable vectors, but there's no diff
on the scalable vror tests, since the hi bits aren't combined away to
undef in SimplifyDemanded for scalable vectors. I'm not sure why that is.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D158625
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4a855a9425ae3d..3e42dbfba4d099 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3571,6 +3571,10 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
}
}
+ // If the hi bits of the splat are undefined, then it's fine to just splat Lo
+ // even if it might be sign extended.
+ if (Hi.isUndef())
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
// Fall back to a stack store and stride x0 vector load.
return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
@@ -6881,6 +6885,12 @@ SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
DAG.getRegister(RISCV::X0, MVT::i32));
+ // If the hi bits of the splat are undefined, then it's fine to just splat Lo
+ // even if it might be sign extended.
+ if (Hi.isUndef())
+ return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
+ DAG.getRegister(RISCV::X0, MVT::i32));
+
// Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
DAG.getUNDEF(VecVT), Lo, Hi,
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll
index 6a57cfb6ca2a92..6568648f98cf57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrol.ll
@@ -916,27 +916,19 @@ define <1 x i64> @vrol_vv_v1i64(<1 x i64> %a, <1 x i64> %b) {
define <1 x i64> @vrol_vx_v1i64(<1 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vrol_vx_v1i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v10, 0
+; CHECK-RV32-NEXT: vmv.v.i v9, 0
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v11, v10, v9
-; CHECK-RV32-NEXT: li a0, 63
+; CHECK-RV32-NEXT: vsub.vx v10, v9, a0
+; CHECK-RV32-NEXT: li a1, 63
; CHECK-RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; CHECK-RV32-NEXT: vmv.s.x v10, a0
+; CHECK-RV32-NEXT: vmv.s.x v9, a1
; CHECK-RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vand.vv v11, v11, v10
-; CHECK-RV32-NEXT: vsrl.vv v11, v8, v11
-; CHECK-RV32-NEXT: vand.vv v9, v9, v10
+; CHECK-RV32-NEXT: vand.vv v10, v10, v9
+; CHECK-RV32-NEXT: vsrl.vv v10, v8, v10
+; CHECK-RV32-NEXT: vand.vx v9, v9, a0
; CHECK-RV32-NEXT: vsll.vv v8, v8, v9
-; CHECK-RV32-NEXT: vor.vv v8, v8, v11
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vor.vv v8, v8, v10
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vrol_vx_v1i64:
@@ -952,24 +944,11 @@ define <1 x i64> @vrol_vx_v1i64(<1 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v10, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vrol_vx_v1i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT: vrol.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_v1i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT: vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vrol_vx_v1i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT: vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <1 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <1 x i64> %b.head, <1 x i64> poison, <1 x i32> zeroinitializer
%x = call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> %b.splat)
@@ -1018,24 +997,18 @@ define <2 x i64> @vrol_vv_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @vrol_vx_v2i64(<2 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vrol_vx_v2i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v10, v9, a0
-; CHECK-RV32-NEXT: vsll.vv v10, v8, v10
+; CHECK-RV32-NEXT: vmv.v.x v9, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v9, v9, a1
+; CHECK-RV32-NEXT: vsll.vv v9, v8, v9
; CHECK-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v11, 0
+; CHECK-RV32-NEXT: vmv.v.i v10, 0
; CHECK-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v9, v11, v9
-; CHECK-RV32-NEXT: vand.vx v9, v9, a0
-; CHECK-RV32-NEXT: vsrl.vv v8, v8, v9
-; CHECK-RV32-NEXT: vor.vv v8, v10, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v10, v10, a0
+; CHECK-RV32-NEXT: vand.vx v10, v10, a1
+; CHECK-RV32-NEXT: vsrl.vv v8, v8, v10
+; CHECK-RV32-NEXT: vor.vv v8, v9, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vrol_vx_v2i64:
@@ -1051,24 +1024,11 @@ define <2 x i64> @vrol_vx_v2i64(<2 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v10, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vrol_vx_v2i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT: vrol.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_v2i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT: vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vrol_vx_v2i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT: vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <2 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <2 x i64> %b.head, <2 x i64> poison, <2 x i32> zeroinitializer
%x = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> %b.splat)
@@ -1117,24 +1077,18 @@ define <4 x i64> @vrol_vv_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @vrol_vx_v4i64(<4 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vrol_vx_v4i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v12, v10, a0
-; CHECK-RV32-NEXT: vsll.vv v12, v8, v12
+; CHECK-RV32-NEXT: vmv.v.x v10, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v10, v10, a1
+; CHECK-RV32-NEXT: vsll.vv v10, v8, v10
; CHECK-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v14, 0
+; CHECK-RV32-NEXT: vmv.v.i v12, 0
; CHECK-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v10, v14, v10
-; CHECK-RV32-NEXT: vand.vx v10, v10, a0
-; CHECK-RV32-NEXT: vsrl.vv v8, v8, v10
-; CHECK-RV32-NEXT: vor.vv v8, v12, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v12, v12, a0
+; CHECK-RV32-NEXT: vand.vx v12, v12, a1
+; CHECK-RV32-NEXT: vsrl.vv v8, v8, v12
+; CHECK-RV32-NEXT: vor.vv v8, v10, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vrol_vx_v4i64:
@@ -1150,24 +1104,11 @@ define <4 x i64> @vrol_vx_v4i64(<4 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v12, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vrol_vx_v4i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v10, (a0), zero
-; CHECK-ZVBB32-NEXT: vrol.vv v8, v8, v10
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_v4i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-ZVBB64-NEXT: vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vrol_vx_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <4 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <4 x i64> %b.head, <4 x i64> poison, <4 x i32> zeroinitializer
%x = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> %b.splat)
@@ -1216,24 +1157,18 @@ define <8 x i64> @vrol_vv_v8i64(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @vrol_vx_v8i64(<8 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vrol_vx_v8i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v16, v12, a0
-; CHECK-RV32-NEXT: vsll.vv v16, v8, v16
+; CHECK-RV32-NEXT: vmv.v.x v12, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v12, v12, a1
+; CHECK-RV32-NEXT: vsll.vv v12, v8, v12
; CHECK-RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v20, 0
+; CHECK-RV32-NEXT: vmv.v.i v16, 0
; CHECK-RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v12, v20, v12
-; CHECK-RV32-NEXT: vand.vx v12, v12, a0
-; CHECK-RV32-NEXT: vsrl.vv v8, v8, v12
-; CHECK-RV32-NEXT: vor.vv v8, v16, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v16, v16, a0
+; CHECK-RV32-NEXT: vand.vx v16, v16, a1
+; CHECK-RV32-NEXT: vsrl.vv v8, v8, v16
+; CHECK-RV32-NEXT: vor.vv v8, v12, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vrol_vx_v8i64:
@@ -1249,27 +1184,17 @@ define <8 x i64> @vrol_vx_v8i64(<8 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v16, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vrol_vx_v8i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT: vrol.vv v8, v8, v12
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_v8i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-ZVBB64-NEXT: vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vrol_vx_v8i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT: vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <8 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <8 x i64> %b.head, <8 x i64> poison, <8 x i32> zeroinitializer
%x = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> %b.splat)
ret <8 x i64> %x
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-ZVBB32: {{.*}}
+; CHECK-ZVBB64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
index 0855d4ca5906a6..75758d402d0321 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
@@ -1595,27 +1595,19 @@ define <1 x i64> @vror_vv_v1i64(<1 x i64> %a, <1 x i64> %b) {
define <1 x i64> @vror_vx_v1i64(<1 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vror_vx_v1i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v10, 0
+; CHECK-RV32-NEXT: vmv.v.i v9, 0
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v11, v10, v9
-; CHECK-RV32-NEXT: li a0, 63
+; CHECK-RV32-NEXT: vsub.vx v10, v9, a0
+; CHECK-RV32-NEXT: li a1, 63
; CHECK-RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
-; CHECK-RV32-NEXT: vmv.s.x v10, a0
+; CHECK-RV32-NEXT: vmv.s.x v9, a1
; CHECK-RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vand.vv v11, v11, v10
-; CHECK-RV32-NEXT: vsll.vv v11, v8, v11
-; CHECK-RV32-NEXT: vand.vv v9, v9, v10
+; CHECK-RV32-NEXT: vand.vv v10, v10, v9
+; CHECK-RV32-NEXT: vsll.vv v10, v8, v10
+; CHECK-RV32-NEXT: vand.vx v9, v9, a0
; CHECK-RV32-NEXT: vsrl.vv v8, v8, v9
-; CHECK-RV32-NEXT: vor.vv v8, v8, v11
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vor.vv v8, v8, v10
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vror_vx_v1i64:
@@ -1631,24 +1623,11 @@ define <1 x i64> @vror_vx_v1i64(<1 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v10, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vror_vx_v1i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT: vror.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_v1i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT: vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vror_vx_v1i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT: vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <1 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <1 x i64> %b.head, <1 x i64> poison, <1 x i32> zeroinitializer
%x = call <1 x i64> @llvm.fshr.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> %b.splat)
@@ -1772,24 +1751,18 @@ define <2 x i64> @vror_vv_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @vror_vx_v2i64(<2 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vror_vx_v2i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v10, v9, a0
-; CHECK-RV32-NEXT: vsrl.vv v10, v8, v10
+; CHECK-RV32-NEXT: vmv.v.x v9, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v9, v9, a1
+; CHECK-RV32-NEXT: vsrl.vv v9, v8, v9
; CHECK-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v11, 0
+; CHECK-RV32-NEXT: vmv.v.i v10, 0
; CHECK-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v9, v11, v9
-; CHECK-RV32-NEXT: vand.vx v9, v9, a0
-; CHECK-RV32-NEXT: vsll.vv v8, v8, v9
-; CHECK-RV32-NEXT: vor.vv v8, v10, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v10, v10, a0
+; CHECK-RV32-NEXT: vand.vx v10, v10, a1
+; CHECK-RV32-NEXT: vsll.vv v8, v8, v10
+; CHECK-RV32-NEXT: vor.vv v8, v9, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vror_vx_v2i64:
@@ -1805,24 +1778,11 @@ define <2 x i64> @vror_vx_v2i64(<2 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v10, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vror_vx_v2i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT: vror.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_v2i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT: vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vror_vx_v2i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT: vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <2 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <2 x i64> %b.head, <2 x i64> poison, <2 x i32> zeroinitializer
%x = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> %b.splat)
@@ -1944,24 +1904,18 @@ define <4 x i64> @vror_vv_v4i64(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @vror_vx_v4i64(<4 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vror_vx_v4i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v12, v10, a0
-; CHECK-RV32-NEXT: vsrl.vv v12, v8, v12
+; CHECK-RV32-NEXT: vmv.v.x v10, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v10, v10, a1
+; CHECK-RV32-NEXT: vsrl.vv v10, v8, v10
; CHECK-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v14, 0
+; CHECK-RV32-NEXT: vmv.v.i v12, 0
; CHECK-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v10, v14, v10
-; CHECK-RV32-NEXT: vand.vx v10, v10, a0
-; CHECK-RV32-NEXT: vsll.vv v8, v8, v10
-; CHECK-RV32-NEXT: vor.vv v8, v12, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v12, v12, a0
+; CHECK-RV32-NEXT: vand.vx v12, v12, a1
+; CHECK-RV32-NEXT: vsll.vv v8, v8, v12
+; CHECK-RV32-NEXT: vor.vv v8, v10, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vror_vx_v4i64:
@@ -1977,24 +1931,11 @@ define <4 x i64> @vror_vx_v4i64(<4 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v12, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vror_vx_v4i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v10, (a0), zero
-; CHECK-ZVBB32-NEXT: vror.vv v8, v8, v10
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_v4i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-ZVBB64-NEXT: vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vror_vx_v4i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT: vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <4 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <4 x i64> %b.head, <4 x i64> poison, <4 x i32> zeroinitializer
%x = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> %b.splat)
@@ -2116,24 +2057,18 @@ define <8 x i64> @vror_vv_v8i64(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @vror_vx_v8i64(<8 x i64> %a, i64 %b) {
; CHECK-RV32-LABEL: vror_vx_v8i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT: sw a0, 12(sp)
-; CHECK-RV32-NEXT: sw a0, 8(sp)
-; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
-; CHECK-RV32-NEXT: li a0, 63
-; CHECK-RV32-NEXT: vand.vx v16, v12, a0
-; CHECK-RV32-NEXT: vsrl.vv v16, v8, v16
+; CHECK-RV32-NEXT: vmv.v.x v12, a0
+; CHECK-RV32-NEXT: li a1, 63
+; CHECK-RV32-NEXT: vand.vx v12, v12, a1
+; CHECK-RV32-NEXT: vsrl.vv v12, v8, v12
; CHECK-RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-RV32-NEXT: vmv.v.i v20, 0
+; CHECK-RV32-NEXT: vmv.v.i v16, 0
; CHECK-RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-RV32-NEXT: vsub.vv v12, v20, v12
-; CHECK-RV32-NEXT: vand.vx v12, v12, a0
-; CHECK-RV32-NEXT: vsll.vv v8, v8, v12
-; CHECK-RV32-NEXT: vor.vv v8, v16, v8
-; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: vsub.vx v16, v16, a0
+; CHECK-RV32-NEXT: vand.vx v16, v16, a1
+; CHECK-RV32-NEXT: vsll.vv v8, v8, v16
+; CHECK-RV32-NEXT: vor.vv v8, v12, v8
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vror_vx_v8i64:
@@ -2149,24 +2084,11 @@ define <8 x i64> @vror_vx_v8i64(<8 x i64> %a, i64 %b) {
; CHECK-RV64-NEXT: vor.vv v8, v16, v8
; CHECK-RV64-NEXT: ret
;
-; CHECK-ZVBB32-LABEL: vror_vx_v8i64:
-; CHECK-ZVBB32: # %bb.0:
-; CHECK-ZVBB32-NEXT: addi sp, sp, -16
-; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT: sw a0, 12(sp)
-; CHECK-ZVBB32-NEXT: sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT: addi a0, sp, 8
-; CHECK-ZVBB32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-ZVBB32-NEXT: vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT: vror.vv v8, v8, v12
-; CHECK-ZVBB32-NEXT: addi sp, sp, 16
-; CHECK-ZVBB32-NEXT: ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_v8i64:
-; CHECK-ZVBB64: # %bb.0:
-; CHECK-ZVBB64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-ZVBB64-NEXT: vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT: ret
+; CHECK-ZVBB-LABEL: vror_vx_v8i64:
+; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT: vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT: ret
%b.head = insertelement <8 x i64> poison, i64 %b, i32 0
%b.splat = shufflevector <8 x i64> %b.head, <8 x i64> poison, <8 x i32> zeroinitializer
%x = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> %b.splat)
@@ -2245,3 +2167,6 @@ define <8 x i64> @vror_vi_rotl_v8i64(<8 x i64> %a) {
ret <8 x i64> %x
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-ZVBB32: {{.*}}
+; CHECK-ZVBB64: {{.*}}
More information about the llvm-commits
mailing list