[llvm] c3eb2da - [RISCV] Optimize sign-extended EXTRACT_VECTOR_ELT nodes
Fraser Cormack via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 5 02:12:12 PST 2021
Author: Fraser Cormack
Date: 2021-02-05T10:05:22Z
New Revision: c3eb2da6c411ace8e466e329cf6b8de58e711dea
URL: https://github.com/llvm/llvm-project/commit/c3eb2da6c411ace8e466e329cf6b8de58e711dea
DIFF: https://github.com/llvm/llvm-project/commit/c3eb2da6c411ace8e466e329cf6b8de58e711dea.diff
LOG: [RISCV] Optimize sign-extended EXTRACT_VECTOR_ELT nodes
This patch custom-legalizes all integer EXTRACT_VECTOR_ELT nodes where
SEW < XLEN to VMV_S_X nodes to help the compiler infer sign bits from
the result. This allows us to eliminate redundant sign extensions.
For parity, all integer EXTRACT_VECTOR_ELT nodes are legalized this way
so that we don't need TableGen patterns for some and not others.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D95741
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 40d6c04cd0ae..9e914881c020 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1480,23 +1480,32 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
}
// Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
-// extract the first element: (extractelt (slidedown vec, idx), 0). This is
-// done to maintain partity with the legalization of RV32 vXi64 legalization.
+// extract the first element: (extractelt (slidedown vec, idx), 0). For integer
+// types this is done using VMV_X_S to allow us to glean information about the
+// sign bits of the result.
SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Idx = Op.getOperand(1);
- if (isNullConstant(Idx))
- return Op;
-
SDValue Vec = Op.getOperand(0);
EVT EltVT = Op.getValueType();
EVT VecVT = Vec.getValueType();
- SDValue Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN, DL, VecVT,
- DAG.getUNDEF(VecVT), Vec, Idx);
+ MVT XLenVT = Subtarget.getXLenVT();
+
+ // If the index is 0, the vector is already in the right position.
+ if (!isNullConstant(Idx)) {
+ Vec = DAG.getNode(RISCVISD::VSLIDEDOWN, DL, VecVT, DAG.getUNDEF(VecVT), Vec,
+ Idx);
+ }
+
+ if (!EltVT.isInteger()) {
+ // Floating-point extracts are handled in TableGen.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
+ DAG.getConstant(0, DL, XLenVT));
+ }
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Slidedown,
- DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+ SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
+ return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
}
SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index b4b948c32c76..82a3f90ac08d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -89,9 +89,8 @@ enum NodeType : unsigned {
GORCI,
GORCIW,
// Vector Extension
- // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT
- // sign extended from the vector element size. NOTE: The result size will
- // never be less than the vector element size.
+ // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
+ // extended from the vector element size.
VMV_X_S,
// Splats an i64 scalar to a vector type (with element type i64) where the
// scalar is a sign-extended i32.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 09b33f63c2dc..76033ed567fe 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -726,53 +726,47 @@ foreach fvti = AllFloatVectors in {
// Vector Element Inserts/Extracts
//===----------------------------------------------------------------------===//
-// The built-in TableGen 'extractelt' and 'insertelt' nodes must return the
-// same type as the vector element type. On RISC-V, XLenVT is the only legal
-// integer type, so for integer inserts/extracts we use a custom node which
-// returns XLenVT.
+// The built-in TableGen 'insertelt' node must return the same type as the
+// vector element type. On RISC-V, XLenVT is the only legal integer type, so
+// for integer inserts we use a custom node which inserts an XLenVT-typed
+// value.
def riscv_insert_vector_elt
: SDNode<"ISD::INSERT_VECTOR_ELT",
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVT<2, XLenVT>,
SDTCisPtrTy<3>]>, []>;
-def riscv_extract_vector_elt
- : SDNode<"ISD::EXTRACT_VECTOR_ELT",
- SDTypeProfile<1, 2, [SDTCisVT<0, XLenVT>, SDTCisPtrTy<2>]>, []>;
-
-multiclass VPatInsertExtractElt_XI_Idx<bit IsFloat> {
- defvar vtilist = !if(IsFloat, AllFloatVectors, AllIntegerVectors);
- defvar insertelt_node = !if(IsFloat, insertelt, riscv_insert_vector_elt);
- defvar extractelt_node = !if(IsFloat, extractelt, riscv_extract_vector_elt);
- foreach vti = vtilist in {
- defvar MX = vti.LMul.MX;
- defvar vmv_xf_s_inst = !cast<Instruction>(!strconcat("PseudoV",
- !if(IsFloat, "F", ""),
- "MV_",
- vti.ScalarSuffix,
- "_S_", MX));
- defvar vmv_s_xf_inst = !cast<Instruction>(!strconcat("PseudoV",
- !if(IsFloat, "F", ""),
- "MV_S_",
- vti.ScalarSuffix,
- "_", MX));
- // Only pattern-match insert/extract-element operations where the index is
- // 0. Any other index will have been custom-lowered to slide the vector
- // correctly into place (and, in the case of insert, slide it back again
- // afterwards).
- def : Pat<(vti.Scalar (extractelt_node (vti.Vector vti.RegClass:$rs2), 0)),
- (vmv_xf_s_inst vti.RegClass:$rs2, vti.SEW)>;
-
- def : Pat<(vti.Vector (insertelt_node (vti.Vector vti.RegClass:$merge),
- vti.ScalarRegClass:$rs1, 0)),
- (vmv_s_xf_inst vti.RegClass:$merge,
- (vti.Scalar vti.ScalarRegClass:$rs1),
- vti.AVL, vti.SEW)>;
- }
-}
let Predicates = [HasStdExtV] in
-defm "" : VPatInsertExtractElt_XI_Idx</*IsFloat*/0>;
+foreach vti = AllIntegerVectors in {
+ def : Pat<(vti.Vector (riscv_insert_vector_elt (vti.Vector vti.RegClass:$merge),
+ vti.ScalarRegClass:$rs1, 0)),
+ (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
+ vti.RegClass:$merge,
+ (vti.Scalar vti.ScalarRegClass:$rs1),
+ vti.AVL, vti.SEW)>;
+}
+
let Predicates = [HasStdExtV, HasStdExtF] in
-defm "" : VPatInsertExtractElt_XI_Idx</*IsFloat*/1>;
+foreach vti = AllFloatVectors in {
+ defvar MX = vti.LMul.MX;
+ defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
+ vti.ScalarSuffix,
+ "_S_", MX));
+ defvar vmv_s_f_inst = !cast<Instruction>(!strconcat("PseudoVFMV_S_",
+ vti.ScalarSuffix,
+ "_", vti.LMul.MX));
+ // Only pattern-match insert/extract-element operations where the index is
+ // 0. Any other index will have been custom-lowered to slide the vector
+ // correctly into place (and, in the case of insert, slide it back again
+ // afterwards).
+ def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
+ (vmv_f_s_inst vti.RegClass:$rs2, vti.SEW)>;
+
+ def : Pat<(vti.Vector (insertelt (vti.Vector vti.RegClass:$merge),
+ vti.ScalarRegClass:$rs1, 0)),
+ (vmv_s_f_inst vti.RegClass:$merge,
+ (vti.Scalar vti.ScalarRegClass:$rs1),
+ vti.AVL, vti.SEW)>;
+}
//===----------------------------------------------------------------------===//
// Miscellaneous RISCVISD SDNodes
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index 6c7dd3e14349..5354e542ebbf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -7,8 +7,6 @@ define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
ret i8 %r
@@ -20,8 +18,6 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 2
ret i8 %r
@@ -33,8 +29,6 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 %idx
ret i8 %r
@@ -45,8 +39,6 @@ define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
ret i8 %r
@@ -58,8 +50,6 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 2
ret i8 %r
@@ -71,8 +61,6 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 %idx
ret i8 %r
@@ -83,8 +71,6 @@ define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
ret i8 %r
@@ -96,8 +82,6 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 2
ret i8 %r
@@ -109,8 +93,6 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 %idx
ret i8 %r
@@ -121,8 +103,6 @@ define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 0
ret i8 %r
@@ -134,8 +114,6 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 2
ret i8 %r
@@ -147,8 +125,6 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 %idx
ret i8 %r
@@ -159,8 +135,6 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
ret i8 %r
@@ -172,8 +146,6 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 2
ret i8 %r
@@ -185,8 +157,6 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 %idx
ret i8 %r
@@ -197,8 +167,6 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
ret i8 %r
@@ -210,8 +178,6 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 2
ret i8 %r
@@ -223,8 +189,6 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 %idx
ret i8 %r
@@ -235,8 +199,6 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
ret i8 %r
@@ -248,8 +210,6 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 2
ret i8 %r
@@ -261,8 +221,6 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 24
-; CHECK-NEXT: srai a0, a0, 24
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 %idx
ret i8 %r
@@ -273,8 +231,6 @@ define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
ret i16 %r
@@ -286,8 +242,6 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 2
ret i16 %r
@@ -299,8 +253,6 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 %idx
ret i16 %r
@@ -311,8 +263,6 @@ define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
ret i16 %r
@@ -324,8 +274,6 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 2
ret i16 %r
@@ -337,8 +285,6 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 %idx
ret i16 %r
@@ -349,8 +295,6 @@ define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 0
ret i16 %r
@@ -362,8 +306,6 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 2
ret i16 %r
@@ -375,8 +317,6 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 %idx
ret i16 %r
@@ -387,8 +327,6 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
ret i16 %r
@@ -400,8 +338,6 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 2
ret i16 %r
@@ -413,8 +349,6 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 %idx
ret i16 %r
@@ -425,8 +359,6 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
ret i16 %r
@@ -438,8 +370,6 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 2
ret i16 %r
@@ -451,8 +381,6 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 %idx
ret i16 %r
@@ -463,8 +391,6 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
ret i16 %r
@@ -476,8 +402,6 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 2
ret i16 %r
@@ -489,8 +413,6 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: srai a0, a0, 16
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 %idx
ret i16 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 6a5e32399c63..af003b3fe4e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -7,8 +7,6 @@ define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
ret i8 %r
@@ -20,8 +18,6 @@ define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 2
ret i8 %r
@@ -33,8 +29,6 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 signext %idx)
; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 %idx
ret i8 %r
@@ -45,8 +39,6 @@ define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
ret i8 %r
@@ -58,8 +50,6 @@ define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 2
ret i8 %r
@@ -71,8 +61,6 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 signext %idx)
; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 %idx
ret i8 %r
@@ -83,8 +71,6 @@ define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
ret i8 %r
@@ -96,8 +82,6 @@ define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 2
ret i8 %r
@@ -109,8 +93,6 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 signext %idx)
; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 %idx
ret i8 %r
@@ -121,8 +103,6 @@ define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 0
ret i8 %r
@@ -134,8 +114,6 @@ define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 2
ret i8 %r
@@ -147,8 +125,6 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 signext %idx)
; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 %idx
ret i8 %r
@@ -159,8 +135,6 @@ define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
ret i8 %r
@@ -172,8 +146,6 @@ define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 2
ret i8 %r
@@ -185,8 +157,6 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 signext %id
; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 %idx
ret i8 %r
@@ -197,8 +167,6 @@ define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
ret i8 %r
@@ -210,8 +178,6 @@ define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 2
ret i8 %r
@@ -223,8 +189,6 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 signext %id
; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 %idx
ret i8 %r
@@ -235,8 +199,6 @@ define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
ret i8 %r
@@ -248,8 +210,6 @@ define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 2
ret i8 %r
@@ -261,8 +221,6 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 signext %id
; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 56
-; CHECK-NEXT: srai a0, a0, 56
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 %idx
ret i8 %r
@@ -273,8 +231,6 @@ define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
ret i16 %r
@@ -286,8 +242,6 @@ define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 2
ret i16 %r
@@ -299,8 +253,6 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 %idx
ret i16 %r
@@ -311,8 +263,6 @@ define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
ret i16 %r
@@ -324,8 +274,6 @@ define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 2
ret i16 %r
@@ -337,8 +285,6 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 %idx
ret i16 %r
@@ -349,8 +295,6 @@ define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 0
ret i16 %r
@@ -362,8 +306,6 @@ define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 2
ret i16 %r
@@ -375,8 +317,6 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 %idx
ret i16 %r
@@ -387,8 +327,6 @@ define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
ret i16 %r
@@ -400,8 +338,6 @@ define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 2
ret i16 %r
@@ -413,8 +349,6 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 %idx
ret i16 %r
@@ -425,8 +359,6 @@ define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
ret i16 %r
@@ -438,8 +370,6 @@ define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 2
ret i16 %r
@@ -451,8 +381,6 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 signext
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 %idx
ret i16 %r
@@ -463,8 +391,6 @@ define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
ret i16 %r
@@ -476,8 +402,6 @@ define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 2
ret i16 %r
@@ -489,8 +413,6 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 signext
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: slli a0, a0, 48
-; CHECK-NEXT: srai a0, a0, 48
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 %idx
ret i16 %r
@@ -501,7 +423,6 @@ define signext i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 0
ret i32 %r
@@ -513,7 +434,6 @@ define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 2
ret i32 %r
@@ -525,7 +445,6 @@ define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 %idx
ret i32 %r
@@ -536,7 +455,6 @@ define signext i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i32> %v, i32 0
ret i32 %r
@@ -548,7 +466,6 @@ define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i32> %v, i32 2
ret i32 %r
@@ -560,7 +477,6 @@ define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i32> %v, i32 %idx
ret i32 %r
@@ -571,7 +487,6 @@ define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 0
ret i32 %r
@@ -583,7 +498,6 @@ define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 2
ret i32 %r
@@ -595,7 +509,6 @@ define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 %idx
ret i32 %r
@@ -606,7 +519,6 @@ define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 0
ret i32 %r
@@ -618,7 +530,6 @@ define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 2
ret i32 %r
@@ -630,7 +541,6 @@ define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %i
; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 %idx
ret i32 %r
@@ -641,7 +551,6 @@ define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 0
ret i32 %r
@@ -653,7 +562,6 @@ define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 2
ret i32 %r
@@ -665,7 +573,6 @@ define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext
; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: sext.w a0, a0
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 %idx
ret i32 %r
More information about the llvm-commits
mailing list